1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24  
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.inline.hpp"
  27 #include "classfile/vmSymbols.hpp"
  28 #include "code/compiledMethod.inline.hpp"
  29 #include "code/scopeDesc.hpp"
  30 #include "code/vmreg.inline.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "interpreter/linkResolver.hpp"
  33 #include "interpreter/oopMapCache.hpp"
  34 #include "logging/log.hpp"
  35 #include "logging/logStream.hpp"
  36 #include "oops/access.inline.hpp"
  37 #include "oops/objArrayOop.inline.hpp"
  38 #include "runtime/continuation.hpp"
  39 #include "runtime/deoptimization.hpp"
  40 #include "runtime/interfaceSupport.inline.hpp"
  41 #include "runtime/frame.hpp"
  42 #include "runtime/javaCalls.hpp"
  43 #include "runtime/vframe_hp.hpp"
  44 #include "utilities/copy.hpp"
  45 #include "utilities/exceptions.hpp"
  46 #include "utilities/macros.hpp"
  47 
  48 // #undef ASSERT
  49 // #undef assert
  50 // #define assert(p, ...) 
  51 
  52 // TODO
  53 //
  54 // !!! Keep an eye out for deopt, and patch_pc
  55 //
  56 // Add:
  57 //  - method/nmethod metadata
  58 //  - scopes
  59 //  - Precise monitor detection
  60 //  - Exceptions in lazy-copy
  61 //  - stack walking in lazy-copy
  62 //  - compress interpreted frames
  63 //  - special native methods: Method.invoke, doPrivileged (+ method handles) 
  64 //  - compiled->intrepreted for serialization (look at scopeDesc)
  65 //  - caching h-stacks in thread stacks
  66 //
  67 // Things to compress in interpreted frames: return address, monitors, last_sp
  68 //
  69 // See: deoptimization.cpp, vframeArray.cpp, abstractInterpreter_x86.cpp
  70 
  71 JVM_ENTRY(void, CONT_Foo(JNIEnv* env, jobject c)) {
  72     tty->print_cr("Hello, World!");
  73 }
  74 JVM_END
  75 
  76 #define CC (char*)  /*cast a literal from (const char*)*/
  77 #define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)
  78 
  79 static JNINativeMethod CONT_methods[] = {
  80     {CC"foo",           CC"()V",        FN_PTR(CONT_Foo)},
  81 };
  82 
  83 void CONT_RegisterNativeMethods(JNIEnv *env, jclass cls) {
  84     int status = env->RegisterNatives(cls, CONT_methods, sizeof(CONT_methods)/sizeof(JNINativeMethod));
  85     guarantee(status == JNI_OK && !env->ExceptionOccurred(), "register java.lang.Continuation natives");
  86 }
  87 
  88 #define YIELD_SIG  "java.lang.Continuation.yield(Ljava/lang/ContinuationScope;)V"
  89 #define ENTER_SIG  "java.lang.Continuation.enter()V"
  90 #define ENTER0_SIG "java.lang.Continuation.enter0()V"
  91 #define RUN_SIG    "java.lang.Continuation.run()V"
  92 
  93 static void print_oop(void *p, oop obj, outputStream* st = tty);
  94 static void print_vframe(frame f, RegisterMap* map = NULL, outputStream* st = tty);
  95 static void print_frames(JavaThread* thread, outputStream* st = tty);
  96 #ifdef ASSERT
  97 static VMReg find_register_spilled_here(void* p, RegisterMap* map);
  98 #endif
  99 
 100 #define HOB (1ULL << 63)
 101 
 102 struct HFrameMetadata {
 103   int num_oops;
 104   unsigned short frame_size;
 105   unsigned short uncompressed_size;
 106 };
 107 
 108 #define METADATA_SIZE sizeof(HFrameMetadata) // bytes
 109 
 110 #define ELEM_SIZE sizeof(jint) // stack is int[]
 111 static inline int to_index(size_t x) { return x >> 2; } // stack is int[]
 112 static inline int to_bytes(int x)    { return x << 2; } // stack is int[]
 113 
 114 static const unsigned char FLAG_LAST_FRAME_INTERPRETED = 1;
 115 
 116 
 117 // static inline HFrameMetadata* metadata(intptr_t* hsp) {
 118 //   return (HFrameMetadata*)((address)hsp - METADATA_SIZE);
 119 // }
 120 
 121 // static inline intptr_t* to_haddress(const void* base, const int index) {
 122 //   return (intptr_t*)((address)base + to_bytes(index));
 123 // }
 124 
 125 static inline int to_index(void* base, void* ptr) {
 126   return to_index((char*)ptr - (char*)base);
 127 }
 128 
 129 static oop get_continuation(JavaThread* thread) {
 130   return java_lang_Thread::continuation(thread->threadObj());
 131 }
 132 
 133 static void set_continuation(JavaThread* thread, oop cont) {
 134   java_lang_Thread::set_continuation(thread->threadObj(), cont);
 135 }
 136 
 137 class ContMirror;
 138 
 139 // Represents a stack frame on the horizontal stack, analogous to the frame class, for vertical-stack frames.
 140 class hframe {
 141 private:
 142   bool _write;
 143   int _sp;
 144   long _fp;
 145   address _pc;
 146   bool _is_interpreted;
 147   CodeBlob* _cb;
 148   int _length;
 149 
 150   friend class ContMirror;
 151 private:
 152   inline HFrameMetadata* meta(ContMirror& cont);
 153   inline intptr_t* real_fp(ContMirror& cont);
 154   inline int real_fp_index(ContMirror& cont);
 155   inline int link_index(ContMirror& cont);
 156   inline address* return_pc_address(ContMirror& cont);
 157 
 158 public:
 159   hframe() : _write(false), _length(0), _sp(-1), _fp(0), _pc(NULL), _is_interpreted(true), _cb(NULL) {}
 160   hframe(const hframe& hf) : _write(hf._write), _length(hf._length), 
 161                              _sp(hf._sp), _fp(hf._fp), _pc(hf._pc), _is_interpreted(hf._is_interpreted), _cb(hf._cb) {}
 162   
 163   hframe(int sp, long fp, address pc, bool write, int length)
 164     : _write(write), _length(length), _sp(sp), _fp(fp), _pc(pc), _is_interpreted(Interpreter::contains(pc)) { 
 165       _cb = NULL;
 166       assert (write  || length > 0, "");
 167       assert (!write || length == 0, "");
 168     }
 169   hframe(int sp, long fp, address pc, CodeBlob* cb, bool is_interpreted, bool write, int length) 
 170     : _write(write), _length(length), _sp(sp), _fp(fp), _pc(pc), _cb(cb), _is_interpreted(is_interpreted) {
 171       assert (write  || length > 0, "");
 172       assert (!write || length == 0, "");
 173     }
 174   hframe(int sp, long fp, address pc, bool is_interpreted, bool write, int length) 
 175     : _write(write), _length(length), _sp(sp), _fp(fp), _pc(pc), _is_interpreted(is_interpreted) { 
 176       _cb = NULL;
 177       assert (write  || length > 0, "");
 178       assert (!write || length == 0, "");
 179     }
 180 
 181   bool operator==(const hframe& other) { return _write == other._write && _sp == other._sp && _fp == other._fp && _pc == other._pc; }
 182   bool is_empty() { return _pc == NULL && _sp < 0; }
 183 
 184   inline bool is_interpreted_frame() { return _is_interpreted; }
 185   inline int       sp() { return _sp; }
 186   inline long      fp() { return _fp; }
 187   inline address   pc() { return _pc; }
 188   inline CodeBlob* cb();
 189 
 190   inline bool write() { return _write; }
 191 
 192   size_t size(ContMirror& cont)              { return meta(cont)->frame_size; }
 193   size_t uncompressed_size(ContMirror& cont) { return meta(cont)->uncompressed_size; }
 194   int num_oops(ContMirror& cont)             { return meta(cont)->num_oops; }
 195 
 196   void set_size(ContMirror& cont, size_t size)              { assert(size < 0xffff, ""); meta(cont)->frame_size = size; }
 197   void set_num_oops(ContMirror& cont, int num)              { assert(num  < 0xffff, ""); meta(cont)->num_oops = num; }
 198   void set_uncompressed_size(ContMirror& cont, size_t size) { assert(size < 0xffff, ""); meta(cont)->uncompressed_size = size; }
 199   
 200   // the link is an offset from the real fp to the sender's fp IFF the sender is interpreted
 201   // otherwise, it's the contents of the rbp register
 202   inline long* link_address(ContMirror& cont);
 203   inline long link(ContMirror& cont)         { return *link_address(cont); }
 204   inline address return_pc(ContMirror& cont) { return *return_pc_address(cont); }
 205 
 206   hframe sender(ContMirror& cont);
 207 
 208   inline void patch_link(ContMirror& cont, long value) { *link_address(cont) = value; }
 209   inline void patch_link_relative(ContMirror& cont, intptr_t* fp);
 210   inline void patch_callee(ContMirror& cont, hframe& sender);
 211   
 212   inline void patch_return_pc(ContMirror& cont, address value) { *return_pc_address(cont) = value; }
 213   inline void patch_real_fp_offset(ContMirror& cont, int offset, intptr_t value) { *(link_address(cont) + offset) = value; }
 214   inline intptr_t* get_real_fp_offset(ContMirror& cont, int offset) { return (intptr_t*)*(link_address(cont) + offset); }
 215   inline void patch_real_fp_offset_relative(ContMirror& cont, int offset, intptr_t* value);
 216 
 217   bool is_bottom(ContMirror& cont);
 218 
 219   inline intptr_t* index_address(ContMirror& cont, int i);
 220 
 221   void print_on(ContMirror& cont, outputStream* st);
 222   void print(ContMirror& cont) { print_on(cont, tty); }
 223   void print_on(outputStream* st);
 224   void print() { print_on(tty); }
 225 };
 226 
 227 // freeze result
 228 enum res_freeze {
 229   freeze_ok = 0,
 230   freeze_pinned_native,
 231   freeze_pinned_monitor = 2
 232 };
 233 
 234 struct oopLoc {
 235   bool narrow  : 1;
 236   unsigned long loc : 63;
 237 };
 238 
 239 // Mirrors the Java continuation objects.
 240 // Contents are read from the Java object at the entry points of this module, and written at exists or intermediate calls into Java
 241 class ContMirror {
 242 private:
 243   JavaThread* const _thread;
 244   /*const*/ oop _cont;
 245   intptr_t* _entrySP;
 246   intptr_t* _entryFP;
 247   address _entryPC;
 248 
 249   int  _sp;
 250   long _fp;
 251   address _pc;
 252 
 253   typeArrayOop _stack;
 254   int _stack_length;
 255   int* _hstack;
 256 
 257   int* _write_stack;
 258   int _wstack_length;
 259   int _wsp; // traditional indexing. increases, equals number of cells written
 260 
 261   size_t _max_size;
 262 
 263   int _ref_sp;
 264   objArrayOop  _ref_stack;
 265   GrowableArray<oopLoc>* _oops;
 266 
 267   unsigned char _flags;
 268 
 269   short _num_interpreted_frames;
 270   short _num_frames;
 271 
 272   ContMirror(const ContMirror& cont); // no copy constructor
 273 
 274   int* stack() { return _hstack; }
 275 
 276   void allocate_stacks(int size, int oops, int frames);
 277   inline intptr_t* write_stack_address(int i);
 278   inline int write_stack_index(void* p);
 279   inline int fix_write_index_after_write(int index);
 280   inline int fix_index_after_write(int index, int old_length, int new_length);
 281 
 282 public:
 283   ContMirror(JavaThread* thread, oop cont);
 284 
 285   DEBUG_ONLY(intptr_t hash() { return _cont->identity_hash(); })
 286   void read();
 287   void write();
 288 
 289   intptr_t* entrySP() { return _entrySP; }
 290   intptr_t* entryFP() { return _entryFP; }
 291   address   entryPC() { return _entryPC; }
 292 
 293   void set_entrySP(intptr_t* sp) { _entrySP = sp; }
 294   void set_entryFP(intptr_t* fp) { _entryFP = fp; }
 295   void set_entryPC(address pc)   {
 296     log_trace(jvmcont)("set_entryPC %p", pc);
 297     _entryPC = pc; 
 298   }
 299 
 300   int sp()                 { return _sp; }
 301   long fp()                { return _fp; }
 302   address pc()             { return _pc; }
 303 
 304   void set_sp(int sp)      { _sp = sp;   }
 305   void set_fp(long fp)     { _fp = fp;   }
 306   void set_pc(address pc)  { _pc = pc; set_flag(FLAG_LAST_FRAME_INTERPRETED, Interpreter::contains(pc));  }
 307 
 308   bool is_flag(unsigned char flag) { return (_flags & flag) != 0; }
 309   void set_flag(unsigned char flag, bool v) { _flags = (v ? _flags |= flag : _flags &= ~v); }
 310 
 311   int stack_length() { return _stack_length; }
 312 
 313   JavaThread* thread() { return _thread; }
 314 
 315   inline bool in_writestack(void *p) { return (_write_stack != NULL && p >= _write_stack && p < (_write_stack + _wstack_length)); }
 316   inline bool in_hstack(void *p) { return (_hstack != NULL && p >= _hstack && p < (_hstack + _stack_length)); }
 317 
 318   void copy_to_stack(void* from, void* to, int size);
 319   void copy_from_stack(void* from, void* to, int size);
 320 
 321   objArrayOop  refStack(int size);
 322   objArrayOop refStack() { return _ref_stack; }
 323   int refSP() { return _ref_sp; }
 324   void set_refSP(int refSP) { log_trace(jvmcont)("set_refSP: %d", refSP); _ref_sp = refSP; }
 325 
 326   typeArrayOop stack(int size);
 327   inline bool in_stack(void *p) { return in_hstack(p) || in_writestack(p); }
 328   inline int stack_index(void* p);
 329   inline intptr_t* stack_address(int i);
 330   inline intptr_t* stack_address(int i, bool write);
 331 
 332   void call_pinned(res_freeze res, frame& f);
 333 
 334   void update_register_map(RegisterMap& map);
 335   bool is_map_at_top(RegisterMap& map);
 336 
 337   bool is_empty();
 338   inline hframe new_hframe(intptr_t* hsp, intptr_t* hfp, address pc, CodeBlob* cb, bool is_interpreted);
 339   hframe last_frame();
 340   inline void set_last_frame(hframe& f);
 341 
 342   void init_write_arrays(int size);
 343   address freeze_target();
 344   inline hframe fix_hframe_afer_write(hframe& hf);
 345   void write_stacks();
 346 
 347   inline void add_oop_location(oop* p);
 348   inline void add_oop_location(narrowOop* p);
 349 
 350   inline oop obj_at(int i);
 351   int num_oops();
 352 
 353   inline size_t max_size() { return _max_size; }
 354   inline void add_size(size_t s) { log_trace(jvmcont)("add max_size: %lu s: %lu", _max_size + s, s); 
 355                                    _max_size += s; }
 356   inline void sub_size(size_t s) { log_trace(jvmcont)("sub max_size: %lu s: %lu", _max_size - s, s);
 357                                    assert(s <= _max_size, "s: %lu max_size: %lu", s, _max_size); 
 358                                    _max_size -= s; }
 359   inline short num_interpreted_frames() { return _num_interpreted_frames; }
 360   inline void inc_num_interpreted_frames() { _num_interpreted_frames++; }
 361   inline void dec_num_interpreted_frames() { _num_interpreted_frames--; }
 362 
 363   inline short num_frames() { return _num_frames; }
 364   inline void inc_num_frames() { _num_frames++; }
 365   inline void dec_num_frames() { _num_frames--; }
 366 
 367   void print_hframes(outputStream* st = tty);
 368 };
 369 
 370 void hframe::print_on(outputStream* st) {
 371   if (is_empty()) {
 372     st->print_cr("\tempty");
 373   } else if (_is_interpreted) {
 374     st->print_cr("\tInterpreted sp: %d fp: %ld pc: %p", _sp, _fp, _pc);
 375   } else {
 376     st->print_cr("\tCompiled sp: %d fp: 0x%lx pc: %p", _sp, _fp, _pc);
 377   }
 378 }
 379 
 380 void hframe::print_on(ContMirror& cont, outputStream* st) {
 381   print_on(st);
 382   if (is_empty())
 383     return;
 384   
 385   st->print_cr("\tMetadata size: %d num_oops: %d", meta(cont)->frame_size, meta(cont)->num_oops);
 386 
 387   if (_is_interpreted) {
 388     intptr_t* fp = index_address(cont, _fp);
 389     Method** method_addr = (Method**)(fp + frame::interpreter_frame_method_offset);
 390     Method* method = *method_addr;
 391     st->print_cr("\tmethod: %p (at %p)", method, method_addr);
 392     st->print("\tmethod: "); method->print_short_name(st); st->cr();
 393 
 394     st->print_cr("\tissp: %ld",             *(long*) (fp + frame::interpreter_frame_sender_sp_offset));
 395     st->print_cr("\tlast_sp: %ld",          *(long*) (fp + frame::interpreter_frame_last_sp_offset));
 396     st->print_cr("\tinitial_sp: %ld",       *(long*) (fp + frame::interpreter_frame_initial_sp_offset));
 397     // st->print_cr("\tmon_block_top: %ld",    *(long*) (fp + frame::interpreter_frame_monitor_block_top_offset));
 398     // st->print_cr("\tmon_block_bottom: %ld", *(long*) (fp + frame::interpreter_frame_monitor_block_bottom_offset));
 399     st->print_cr("\tlocals: %ld",           *(long*) (fp + frame::interpreter_frame_locals_offset));
 400     st->print_cr("\tcache: %p",             *(void**)(fp + frame::interpreter_frame_cache_offset));
 401     st->print_cr("\tbcp: %p",               *(void**)(fp + frame::interpreter_frame_bcp_offset));
 402     st->print_cr("\tbci: %d",               method->bci_from(*(address*)(fp + frame::interpreter_frame_bcp_offset)));
 403     st->print_cr("\tmirror: %p",            *(void**)(fp + frame::interpreter_frame_mirror_offset));
 404     // st->print("\tmirror: "); os::print_location(st, *(intptr_t*)(fp + frame::interpreter_frame_mirror_offset), true);
 405   } else {
 406     st->print_cr("\tcb: %p", cb());
 407     if (_cb != NULL) {
 408       st->print("\tcb: "); _cb->print_value_on(st); st->cr();
 409       st->print_cr("\tcb.frame_size: %d", _cb->frame_size());
 410     }
 411   }
 412   st->print_cr("\tlink: 0x%lx %ld (at: %p)", link(cont), link(cont), link_address(cont));
 413   st->print_cr("\treturn_pc: %p (at %p)", return_pc(cont), return_pc_address(cont));
 414 
 415   if (false) {
 416     address sp = (address)index_address(cont, _sp);
 417     st->print_cr("--data--");
 418     int fsize = meta(cont)->frame_size;
 419     for(int i=0; i < fsize; i++)
 420       st->print_cr("%p: %x", (sp + i), *(sp + i));
 421     st->print_cr("--end data--");
 422   }
 423 }
 424 
 425 inline intptr_t* hframe::index_address(ContMirror& cont, int i) { 
 426   assert (_length == (_write ? 0 : cont.stack_length()), "write: %d length: %d cont.stack_length: %d", _write, _length, cont.stack_length());
 427   return (intptr_t*)cont.stack_address(i, _write); 
 428 }
 429 
 430 inline HFrameMetadata* hframe::meta(ContMirror& cont) { 
 431   return (HFrameMetadata*)index_address(cont, _sp - to_index(METADATA_SIZE));
 432 }
 433 
 434 bool hframe::is_bottom(ContMirror& cont) {
 435   assert (!_write, "");
 436   return _sp + to_index(size(cont) + METADATA_SIZE) >= cont.stack_length();
 437 }
 438 
 439 inline CodeBlob* hframe::cb() {
 440   if (_cb == NULL && !_is_interpreted) // compute lazily
 441     _cb = CodeCache::find_blob(_pc);
 442   return _cb;
 443 }
 444 
 445 inline intptr_t* hframe::real_fp(ContMirror& cont) {
 446   assert (!_is_interpreted, "interpreted");
 447   return index_address(cont, _sp) + cb()->frame_size();
 448 }
 449 
 450 inline int hframe::real_fp_index(ContMirror& cont) {
 451   assert (!_is_interpreted, "interpreted");
 452   assert (_length == cont.stack_length(), "");
 453   return _sp + to_index(cb()->frame_size() * sizeof(intptr_t));
 454 }
 455 
 456 inline long* hframe::link_address(ContMirror& cont) {
 457   return _is_interpreted 
 458     ? (long*)&index_address(cont, _fp)[frame::link_offset]
 459     : (long*)(real_fp(cont) - frame::sender_sp_offset); // x86-specific
 460 }
 461 
 462 inline int hframe::link_index(ContMirror& cont) {
 463   return _is_interpreted ? _fp : (real_fp_index(cont) - to_index(frame::sender_sp_offset * sizeof(intptr_t*))); // x86-specific
 464 }
 465 
 466 inline address* hframe::return_pc_address(ContMirror& cont) {
 467   return _is_interpreted
 468     ? (address*)&index_address(cont, _fp)[frame::return_addr_offset]
 469     : (address*)(real_fp(cont) - 1); // x86-specific
 470 }
 471 
 472 inline void hframe::patch_real_fp_offset_relative(ContMirror& cont, int offset, intptr_t* value) {
 473   long* la = (long*)((_is_interpreted ? index_address(cont, _fp) : real_fp(cont)) + offset);
 474   *la = to_index((address)value - (address)la);
 475   log_trace(jvmcont)("patched relative offset: %d value: %p", offset, value);
 476 }
 477 
 478 inline void hframe::patch_link_relative(ContMirror& cont, intptr_t* fp) {
 479   long* la = link_address(cont);
 480   *la = to_index((address)fp - (address)la);
 481   log_trace(jvmcont)("patched link: %ld", *la);
 482 }
 483 
 484 inline void hframe::patch_callee(ContMirror& cont, hframe& sender) {
 485   assert (_write == sender._write, "");
 486   if (sender.is_interpreted_frame()) {
 487     patch_link_relative(cont, sender.link_address(cont));
 488   } else {
 489     patch_link(cont, sender.fp());
 490   }
 491   if (is_interpreted_frame()) {
 492     patch_real_fp_offset_relative(cont, frame::interpreter_frame_sender_sp_offset, index_address(cont, sender.sp()));
 493   }
 494 }
 495 
 496 hframe hframe::sender(ContMirror& cont) {
 497   assert (!_write, "");
 498   assert (_length == cont.stack_length(), "");
 499   address sender_pc = return_pc(cont);
 500   bool is_sender_interpreted = Interpreter::contains(sender_pc);
 501   int sender_sp = _sp + to_index(size(cont) + METADATA_SIZE);
 502   long sender_fp = link(cont);
 503   // log_trace(jvmcont)("hframe::sender sender_fp0: %ld", sender_fp);
 504   // if (log_is_enabled(Trace, jvmcont)) print_on(cont, tty);
 505   if (is_sender_interpreted) {
 506     sender_fp += link_index(cont); 
 507     // log_trace(jvmcont)("hframe::sender real_fp: %d sender_fp: %ld", link_index(cont), sender_fp);
 508   }
 509   if (sender_sp >= cont.stack_length())
 510     return hframe();
 511   return hframe(sender_sp, sender_fp, sender_pc, is_sender_interpreted, _write, _length);
 512 }
 513 
 514 ContMirror::ContMirror(JavaThread* thread, oop cont)
 515  : _thread(thread) {
 516   _cont = cont;
 517   _stack     = NULL;
 518   _hstack    = NULL;
 519   _ref_stack = NULL;
 520   _stack_length = 0;
 521   _oops = NULL;
 522   _write_stack = NULL;
 523   _wstack_length = 0;
 524   _wsp = 0;
 525   _num_frames = 0;
 526   _num_interpreted_frames = 0;
 527   _flags = 0;
 528 }
 529 
 530 void ContMirror::read() {
 531   log_trace(jvmcont)("Reading continuation object:");
 532 
 533   _entrySP = (intptr_t*) java_lang_Continuation::entrySP(_cont);
 534   _entryFP = NULL;
 535   _entryPC = (address) java_lang_Continuation::entryPC(_cont);
 536   log_trace(jvmcont)("set_entryPC Z %p", _entryPC);
 537   log_trace(jvmcont)("\tentrySP: %p entryFP: %p entryPC: %p", _entrySP, _entryFP, _entryPC);
 538 
 539   _sp = java_lang_Continuation::sp(_cont);
 540   _fp = java_lang_Continuation::fp(_cont);
 541   _pc = (address)java_lang_Continuation::pc(_cont);
 542   log_trace(jvmcont)("\tsp: %d fp: %ld 0x%lx pc: %p", _sp, _fp, _fp, _pc);
 543 
 544   _stack = java_lang_Continuation::stack(_cont);
 545   if (_stack != NULL) {
 546     _stack_length = _stack->length();
 547     _hstack = (int*)_stack->base(T_INT);
 548   } else {
 549     _stack_length = 0;
 550     _hstack = NULL;
 551   }
 552   _max_size = java_lang_Continuation::maxSize(_cont);
 553   log_trace(jvmcont)("\tstack: %p hstack: %p, stack_length: %d max_size: %lu", (oopDesc*)_stack, _hstack, _stack_length, _max_size);
 554 
 555   _ref_stack = java_lang_Continuation::refStack(_cont);
 556   _ref_sp = java_lang_Continuation::refSP(_cont);
 557   log_trace(jvmcont)("\tref_stack: %p ref_sp: %d", (oopDesc*)_ref_stack, _ref_sp);
 558 
 559   _flags = java_lang_Continuation::flags(_cont);
 560   log_trace(jvmcont)("\tflags: %d", _flags);
 561 
 562   _num_frames = java_lang_Continuation::numFrames(_cont);
 563   log_trace(jvmcont)("\tnum_frames: %d", _num_frames);
 564 
 565   _num_interpreted_frames = java_lang_Continuation::numInterpretedFrames(_cont);
 566   log_trace(jvmcont)("\tnum_interpreted_frames: %d", _num_interpreted_frames);
 567 }
 568 
 569 void ContMirror::write() {
 570   log_trace(jvmcont)("Writing continuation object:");
 571   
 572   log_trace(jvmcont)("\tsp: %d fp: %ld 0x%lx pc: %p", _sp, _fp, _fp, _pc);
 573   java_lang_Continuation::set_sp(_cont, _sp);
 574   java_lang_Continuation::set_fp(_cont, _fp);
 575   java_lang_Continuation::set_pc(_cont, _pc);
 576 
 577   log_trace(jvmcont)("WRITE set_entryPC: %p", _entryPC);
 578   java_lang_Continuation::set_entrySP(_cont, _entrySP);
 579   // java_lang_Continuation::set_entryFP(_cont, _entryFP);
 580   java_lang_Continuation::set_entryPC(_cont, _entryPC);
 581 
 582   write_stacks();
 583 
 584   log_trace(jvmcont)("\tmax_size: %lu", _max_size);
 585   java_lang_Continuation::set_maxSize(_cont, (jint)_max_size);
 586 
 587   log_trace(jvmcont)("\tref_sp: %d", _ref_sp);
 588   java_lang_Continuation::set_refSP(_cont, _ref_sp);
 589 
 590   java_lang_Continuation::set_flags(_cont, _flags);
 591   log_trace(jvmcont)("\tflags: %d", _flags);
 592 
 593   java_lang_Continuation::set_numFrames(_cont, _num_frames);
 594   log_trace(jvmcont)("\tnum_frames: %d", _num_frames);
 595 
 596   java_lang_Continuation::set_numInterpretedFrames(_cont, _num_interpreted_frames);
 597   log_trace(jvmcont)("\tnum_interpreted_frames: %d", _num_interpreted_frames);
 598 
 599   log_trace(jvmcont)("\tend write");
 600 }
 601 
 602 bool ContMirror::is_empty() {
 603   return _sp < 0 || _sp >= _stack->length();
 604 }
 605 
 606 hframe ContMirror::last_frame() {
 607   return is_empty() ? hframe() : hframe(_sp, _fp, _pc, false, _stack_length);
 608 }
 609 
 610 inline void ContMirror::set_last_frame(hframe& f) {
 611   assert (f._length = _stack_length, "");
 612   set_sp(f.sp()); set_fp(f.fp()); set_pc(f.pc()); 
 613   log_trace(jvmcont)("set_last_frame cont sp: %d fp: 0x%lx pc: %p", sp(), fp(), pc());
 614   if (log_is_enabled(Trace, jvmcont)) f.print_on(*this, tty);
 615   if (is_empty()) {
 616     set_fp(0);
 617     set_pc(NULL);
 618   }
 619 }
 620 
 621 inline int ContMirror::stack_index(void* p) { 
 622   int i = to_index(stack(), p); 
 623   assert (i >= 0 && i < stack_length(), "i: %d length: %d", i, stack_length());
 624   return i; 
 625 }
 626 
 627 inline intptr_t* ContMirror::stack_address(int i) { 
 628   assert (i >= 0 && i < stack_length(), "i: %d length: %d", i, stack_length());
 629   return (intptr_t*)&stack()[i]; 
 630 }
 631 
 632 inline int ContMirror::write_stack_index(void* p) { 
 633   assert (_write_stack != NULL, "");
 634   int i = to_index(_write_stack, p); 
 635   assert (i >= 0 && i < _wstack_length, "i: %d length: %d", i, _wstack_length);
 636   return i; 
 637 }
 638 
 639 inline intptr_t* ContMirror::write_stack_address(int i) { 
 640   assert (_write_stack != NULL, "");
 641   assert (i >= 0 && i < _wstack_length, "i: %d length: %d", i, _wstack_length);
 642   return (intptr_t*)&_write_stack[i]; 
 643 }
 644 
 645 inline intptr_t* ContMirror::stack_address(int i, bool write) {
 646   return write ? write_stack_address(i) : stack_address(i);
 647 }
 648 
 649 void ContMirror::copy_to_stack(void* from, void* to, int size) {
 650   log_trace(jvmcont)("Copying from v: %p - %p (%d bytes)", from, (address)from + size, size);
 651   log_trace(jvmcont)("Copying to h: %p - %p (%d - %d)", to, (address)to + size, to_index(_write_stack, to), to_index(_write_stack, (address)to + size));
 652 
 653   assert (size > 0, "size: %d", size);
 654   assert (write_stack_index(to) >= 0, "");
 655   assert (to_index(_write_stack, (address)to + size) <= _wstack_length, "");
 656 
 657   // this assertion is just to check whether the copying happens as intended, but not otherwise required for this method.
 658   assert (write_stack_index(to) == _wsp + to_index(METADATA_SIZE), "to: %d wsp: %d", write_stack_index(to), _wsp);
 659 
 660   Copy::conjoint_memory_atomic(from, to, size);
 661   _wsp = to_index(_write_stack, (address)to + size);
 662 }
 663 
 664 void ContMirror::copy_from_stack(void* from, void* to, int size) {
 665   log_trace(jvmcont)("Copying from h: %p - %p (%d - %d)", from, (address)from + size, to_index(stack(), from), to_index(stack(), (address)from + size));
 666   log_trace(jvmcont)("Copying to v: %p - %p (%d bytes)", to, (address)to + size, size);
 667 
 668   assert (size > 0, "size: %d", size);
 669   assert (stack_index(from) >= 0, "");
 670   assert (to_index(stack(), (address)from + size) <= stack_length(), "");
 671 
 672   Copy::conjoint_memory_atomic(from, to, size);
 673 }
 674 
 675 void ContMirror::allocate_stacks(int size, int oops, int frames) {
 676   bool need_allocation = false;
 677   log_trace(jvmcont)("stack(int): size: %d size(int): %d sp: %d", size, to_index(size), _sp);
 678   if (_stack == NULL || to_index(size) >= _sp - to_index(METADATA_SIZE)) need_allocation = true;
 679   
 680   log_trace(jvmcont)("num_oops: %d ref_sp: %d", oops, _ref_sp);
 681   if (_ref_stack == NULL || oops > _ref_sp) need_allocation = true;
 682   if (!need_allocation)
 683     return;
 684 
 685   log_trace(jvmcont)("allocating stacks");
 686 
 687   assert(_sp == java_lang_Continuation::sp(_cont), "");
 688   assert(_fp == java_lang_Continuation::fp(_cont), "");
 689   assert(_pc == java_lang_Continuation::pc(_cont), "");
 690 
 691   int old_stack_length = _stack_length;
 692 
 693   HandleMark hm(_thread);
 694   Handle conth(_thread, _cont);
 695   JavaCallArguments args;
 696   args.push_oop(conth);
 697   args.push_int(size);
 698   args.push_int(oops);
 699   args.push_int(frames);
 700   JavaValue result(T_VOID);
 701   JavaCalls::call_virtual(&result, SystemDictionary::Continuation_klass(), vmSymbols::getStacks_name(), vmSymbols::continuationGetStacks_signature(), &args, _thread);
 702   _cont = conth();  // reload oop after java call
 703 
 704   _stack = java_lang_Continuation::stack(_cont);
 705   _stack_length = _stack->length();
 706   _hstack = (int*)_stack->base(T_INT);
 707 
 708   _sp = (old_stack_length <= 0 || _sp < 0) ? _stack_length + to_index(METADATA_SIZE) : _stack_length - (old_stack_length - _sp);
 709   if (Interpreter::contains(_pc)) // only interpreter frames use relative (index) fp
 710     _fp = _stack_length - (old_stack_length - _fp);
 711 
 712   // These assertions aren't important, as we'll overwrite the Java-computed ones, but they're just to test that the Java computation is OK.
 713   // assert(_sp == java_lang_Continuation::sp(_cont), "");
 714   // assert(_fp == java_lang_Continuation::fp(_cont), "");
 715   // assert(_pc == java_lang_Continuation::pc(_cont), "");
 716 
 717   log_trace(jvmcont)("sp: %d stack_length: %d", _sp, _stack_length);
 718 
 719   _ref_stack = java_lang_Continuation::refStack(_cont);
 720   _ref_sp    = java_lang_Continuation::refSP(_cont);
 721 
 722   log_trace(jvmcont)("ref_sp: %d refStack length: %d", _ref_sp, _ref_stack->length());
 723 
 724   if (!thread()->has_pending_exception()) return;
 725 
 726   assert (to_bytes(_stack_length) >= size, "sanity check: stack_size: %d size: %d", to_bytes(_stack_length), size);
 727   assert (to_bytes(_sp) - (int)METADATA_SIZE >= size, "sanity check");
 728   assert (to_bytes(_ref_sp) >= oops, "oops: %d ref_sp: %d refStack length: %d", oops, _ref_sp, _ref_stack->length());
 729 }
 730 
 731 void ContMirror::write_stacks() {
 732   if (_write_stack == NULL) {
 733     assert(_oops == NULL, "");
 734     return;
 735   }
 736 
 737   log_trace(jvmcont)("Writing stacks");
 738 
 739   int num_oops = _oops->length();
 740   int size = to_bytes(_wsp);
 741 
 742   allocate_stacks(size, num_oops, 0);
 743   if (thread()->has_pending_exception()) return;
 744 
 745   address to = (address)stack_address(_sp - to_index(METADATA_SIZE) - _wsp);
 746   log_trace(jvmcont)("Copying %d bytes", size); 
 747   log_trace(jvmcont)("Copying to h: %p - %p (%d - %d)", to, to + size, to_index(stack(), to), to_index(stack(), to + size));
 748 
 749   Copy::conjoint_memory_atomic(_write_stack, to, size);
 750 
 751   // delete _write_stack;
 752   _write_stack = NULL;
 753   
 754   log_trace(jvmcont)("Copying %d oops", num_oops);
 755   for (int i = 0; i < _oops->length(); i++) {
 756     oopLoc ol = _oops->at(i);
 757     oop obj = ol.narrow ? (oop)RootAccess<>::oop_load(reinterpret_cast<narrowOop*>(ol.loc))
 758                         :      RootAccess<>::oop_load(reinterpret_cast<oop*>(ol.loc));
 759     int index = _ref_sp - num_oops + i;
 760     log_trace(jvmcont)("i: %d -> index: %d narrow: %d", i, index, ol.narrow); print_oop((void*)ol.loc, obj);
 761     assert (oopDesc::is_oop_or_null(obj), "invalid oop");
 762     _ref_stack->obj_at_put(index, obj); // does a HeapAccess<IN_HEAP_ARRAY> write barrier
 763   }
 764 
 765   _ref_sp = _ref_sp - num_oops;
 766   assert (_ref_sp >= 0, "_ref_sp: %d", _ref_sp);
 767   // delete oops;
 768   _oops = NULL;  
 769 }
 770 
 771 inline hframe ContMirror::new_hframe(intptr_t* hsp, intptr_t* hfp, address pc, CodeBlob* cb, bool is_interpreted) {
 772   assert (!is_interpreted || in_writestack(hsp) == in_writestack(hfp), "");
 773 
 774   bool write = in_writestack(hsp);
 775   int sp;
 776   long fp;
 777   if (write) {
 778     sp = write_stack_index(hsp);
 779     fp = is_interpreted ? write_stack_index(hfp) : (long)hfp;
 780   } else {
 781     sp = stack_index(hsp);
 782     fp = is_interpreted ? stack_index(hfp) : (long)hfp;
 783   }
 784   return hframe(sp, fp, pc, cb, is_interpreted, write, write ? 0 : _stack_length);
 785 }
 786 
 787 inline int ContMirror::fix_write_index_after_write(int index) {
 788   return _sp - to_index(METADATA_SIZE) - _wsp + index;
 789 }
 790 
 791 inline int ContMirror::fix_index_after_write(int index, int old_length, int new_length) {
 792   return new_length - (old_length - index);
 793 }
 794 
 795 inline hframe ContMirror::fix_hframe_afer_write(hframe& hf) {
 796   if (hf.write()) {
 797     return hframe(fix_write_index_after_write(hf.sp()), 
 798                   hf.is_interpreted_frame() ? fix_write_index_after_write(hf.fp()) : hf.fp(),
 799                   hf.pc(),
 800                   hf.cb(),
 801                   hf.is_interpreted_frame(),
 802                   false,
 803                   _stack_length);
 804   } else {
 805     return hframe(fix_index_after_write(hf.sp(), hf._length, _stack_length), 
 806                   hf.is_interpreted_frame() ? fix_index_after_write(hf.fp(), hf._length, _stack_length) : hf.fp(),
 807                   hf.pc(),
 808                   hf.cb(),
 809                   hf.is_interpreted_frame(),
 810                   false,
 811                   _stack_length);
 812   }
 813 }
 814 
 815 void ContMirror::init_write_arrays(int size) {
 816   _oops = new GrowableArray<oopLoc>();
 817 
 818   _wstack_length = to_index(size + 8); // due to overlap of bottom interpreted frame with entry frame
 819   _write_stack = NEW_RESOURCE_ARRAY(int, _wstack_length);
 820   _wsp = 0;
 821 }
 822 
 823 address ContMirror::freeze_target() {
 824   assert (_write_stack != NULL, "");
 825   return (address) _write_stack;
 826 }
 827 
 828 inline void ContMirror::add_oop_location(oop* p) { 
 829   log_trace(jvmcont)("i: %d (oop)", _oops->length());
 830   assert ((((unsigned long)p) & HOB) == 0, "HOB on: %p", p);
 831   _oops->append((oopLoc){false, (unsigned long)p});
 832 }
 833 
 834 inline void ContMirror::add_oop_location(narrowOop* p) { 
 835   log_trace(jvmcont)("i: %d (narrow)", _oops->length());
 836   assert ((((unsigned long)p) & HOB) == 0, "HOB on: %p", p);
 837   _oops->append((oopLoc){true, (unsigned long)p});
 838 }
 839 
 840 inline oop ContMirror::obj_at(int i) {
 841   assert (_ref_stack != NULL, "");
 842   assert (_ref_sp <= i && i < _ref_stack->length(), "i: %d _ref_sp: %d, length: %d", i, _ref_sp, _ref_stack->length());
 843   return _ref_stack->obj_at(i);
 844 }
 845 
 846 int ContMirror::num_oops() {
 847   return _ref_stack == NULL ? 0 : _ref_stack->length() - _ref_sp;
 848 }
 849 
 850 void ContMirror::update_register_map(RegisterMap& map) {
 851   log_trace(jvmcont)("Setting RegisterMap saved link address to: %p", &_fp);
 852   frame::update_map_with_saved_link(&map, (intptr_t **)&_fp);
 853 }
 854 
 855 bool ContMirror::is_map_at_top(RegisterMap& map) {
 856   return (map.location(rbp->as_VMReg()) == (address)&_fp);
 857 }
 858 
 859 void ContMirror::call_pinned(res_freeze res, frame& f) {
 860   write();
 861 
 862   HandleMark hm(_thread);
 863   Handle conth(_thread, _cont);
 864   JavaCallArguments args;
 865   args.push_oop(conth);
 866   args.push_int(res);
 867   JavaValue result(T_VOID);
 868   JavaCalls::call_virtual(&result, SystemDictionary::Continuation_klass(), vmSymbols::onPinned_name(), vmSymbols::continuationOnPinned_signature(), &args, _thread);
 869   _cont = conth();  // reload oop after java call
 870   log_trace(jvmcont)("YTYTYTYTYTYT");
 871 }
 872 
 873 // static inline bool is_empty(frame& f) {
 874 //   return f.pc() == NULL;
 875 // }
 876 
 877 static inline Method* frame_method(const frame& f) {
 878   Method* m = NULL;
 879   if (f.is_interpreted_frame())
 880     m = f.interpreter_frame_method();
 881   else if (f.is_compiled_frame())
 882     m = ((CompiledMethod*)f.cb())->method();
 883   return m;
 884 }
 885 
 886 #ifdef ASSERT
 887 static char* frame_name(frame& f) {
 888   Method* m = frame_method(f);
 889   return m != NULL ? m->name_and_sig_as_C_string() : NULL;
 890 }
 891 #endif
 892 
 893 // works only in thaw
 894 static inline bool is_entry_frame(ContMirror& cont, frame& f) {
 895   return f.sp() == cont.entrySP();
 896 }
 897 
 898 static inline bool is_deopt_return(address pc, frame& sender) {
 899   if (sender.is_interpreted_frame()) return false;
 900 
 901   CompiledMethod* cm = sender.cb()->as_compiled_method();
 902   return cm->is_deopt_pc(pc);
 903 }
 904 
 905 static inline intptr_t** real_link_address(frame& f, bool is_interpreted) {
 906   return is_interpreted
 907             ? (intptr_t**)(f.fp() + frame::link_offset)
 908             : (intptr_t**)(f.real_fp() - frame::sender_sp_offset); // x86-specific
 909 }
 910 
 911 // static inline intptr_t* real_link(frame& f, bool is_interpreted) { 
 912 //   return *real_link_address(f, is_interpreted);
 913 // }
 914 
 915 static void patch_link(frame& f, intptr_t* fp, bool is_interpreted) {
 916   *real_link_address(f, is_interpreted) = fp;
 917   log_trace(jvmcont)("patched link: %p", fp);
 918 }
 919 
 920 static void patch_sender_sp(frame& f, intptr_t* sp) {
 921   assert (f.is_interpreted_frame(), "");
 922   *(intptr_t**)(f.fp() + frame::interpreter_frame_sender_sp_offset) = sp;
 923   log_trace(jvmcont)("patched sender_sp: %p", sp);
 924 }
 925 
 926 static inline address* return_pc_address(const frame& f, bool is_interpreted) {
 927   return is_interpreted
 928             ? (address*)(f.fp() + frame::return_addr_offset)
 929             : (address*)(f.real_fp() - 1); // x86-specific
 930 }
 931 
 932 static inline address return_pc(const frame& f, bool is_interpreted) {
 933   return *return_pc_address(f, is_interpreted);
 934 }
 935 
 936 static void patch_return_pc(frame& f, address pc, bool is_interpreted) {
 937   *return_pc_address(f, is_interpreted) = pc;
 938   log_trace(jvmcont)("patched return_pc: %p", pc);
 939 }
 940 
 941 // static void patch_interpreted_bci(frame& f, int bci) {
 942 //   f.interpreter_frame_set_bcp(f.interpreter_frame_method()->bcp_from(bci));
 943 // }
 944 
 945 static inline int interpreter_frame_expression_stack_size(frame &f) {
 946   Method* m = f.interpreter_frame_method();
 947   int   bci = f.interpreter_frame_bci();
 948   InterpreterOopMap mask;
 949   OopMapCache::compute_one_oop_map(m, bci, &mask); // TODO: reuse in freeze_oops ?
 950   return mask.expression_stack_size();
 951 }
 952 
 953 static inline intptr_t* interpreted_frame_top(frame& f) { // inclusive; this will be copied with the frame
 954   return *(intptr_t**)f.addr_at(frame::interpreter_frame_initial_sp_offset) - interpreter_frame_expression_stack_size(f);
 955 }
 956 
 957 static inline intptr_t* compiled_frame_top(frame& f) { // inclusive; this will be copied with the frame
 958   return f.unextended_sp();
 959 }
 960 
 961 static inline intptr_t* frame_top(frame &f) { // inclusive; this will be copied with the frame
 962   return f.is_interpreted_frame() ? interpreted_frame_top(f) : compiled_frame_top(f);
 963 }
 964 
 965 static inline intptr_t* interpreted_frame_bottom(frame& f) { // exclusive; this will not be copied with the frame
 966 #ifdef ASSERT
 967     RegisterMap map(JavaThread::current(), false); // if thread is NULL we don't get a fix for the return barrier -> entry frame
 968     frame sender = f.sender(&map);
 969     intptr_t* locals_plus_one = *(intptr_t**)f.addr_at(frame::interpreter_frame_locals_offset) + 1;
 970     if (frame_top(sender) != locals_plus_one) {
 971       log_trace(jvmcont)("f: "); print_vframe(f);
 972       log_trace(jvmcont)("sender: "); print_vframe(sender);
 973     }
 974     assert (frame_top(sender) >= locals_plus_one, "sender top: %p locals+1: %p", frame_top(sender), locals_plus_one);
 975 #endif
 976     return *(intptr_t**)f.addr_at(frame::interpreter_frame_locals_offset) + 1; // exclusive, so we add 1 word
 977 }
 978 
 979 static inline intptr_t* compiled_frame_bottom(frame& f) { // exclusive; this will not be copied with the frame
 980   return f.unextended_sp() + f.cb()->frame_size();
 981 }
 982 
 983 static inline int compiled_frame_num_parameters(frame& f) {
 984   assert (f.is_compiled_frame(), "");
 985   if (Interpreter::contains(return_pc(f, false))) {
 986     return ((CompiledMethod*)f.cb())->method()->size_of_parameters();
 987   }
 988   return 0;
 989 }
 990 
 991 static inline intptr_t* frame_bottom(frame &f) { // exclusive this will be copied with the frame
 992   return f.is_interpreted_frame() ? interpreted_frame_bottom(f) : compiled_frame_bottom(f);
 993 }
 994 
 995 static bool is_interpreted_frame_owning_locks(frame& f) {
 996   return f.interpreter_frame_monitor_end() < f.interpreter_frame_monitor_begin();
 997 }
 998 
 999 static bool is_compiled_frame_owning_locks(JavaThread* thread, RegisterMap* map, frame& f) {
1000   ResourceMark rm(thread); // vframes/scopes are allocated in the resource area
1001 
1002   nmethod* nm = f.cb()->as_nmethod();
1003   assert (!nm->is_compiled() || !nm->as_compiled_method()->is_native_method(), ""); // ??? See compiledVFrame::compiledVFrame(...) in vframe_hp.cpp
1004 
1005   for (ScopeDesc* scope = nm->scope_desc_at(f.pc()); scope != NULL; scope = scope->sender()) {
1006     // scope->print_on(tty);
1007     GrowableArray<MonitorValue*>* mons = scope->monitors();
1008     if (mons == NULL || mons->is_empty())
1009       continue;
1010     return true;
1011     for (int index = (mons->length()-1); index >= 0; index--) { // see compiledVFrame::monitors()
1012       MonitorValue* mon = mons->at(index);
1013       if (mon->eliminated())
1014         continue;
1015       ScopeValue* ov = mon->owner();
1016       StackValue* owner_sv = StackValue::create_stack_value(&f, map, ov); // it is an oop
1017       oop owner = owner_sv->get_obj()();
1018       if (owner != NULL)
1019         return true;
1020     }
1021   }
1022   return false;
1023 }
1024 
1025 static inline void relativize(intptr_t* const fp, intptr_t* const hfp, int offset) {
1026     *(long*)(hfp + offset) = to_index((address)*(hfp + offset) - (address)fp);
1027 }
1028 
1029 static inline void derelativize(intptr_t* const fp, int offset) {
1030     *(fp + offset) = (intptr_t)((address)fp + to_bytes(*(long*)(fp + offset)));
1031 }
1032 
1033 class ContOopClosure : public OopClosure, public DerivedOopClosure {
1034 protected:
1035   ContMirror* const _cont;
1036   void* const _vsp;
1037   frame* _fr;
1038   int _count;
1039 #ifdef ASSERT
1040   RegisterMap* _map;
1041 #endif
1042 
1043 public:
1044   int count() { return _count; }
1045 
1046 protected:
1047   ContOopClosure(ContMirror* cont, frame* fr, RegisterMap* map, void* vsp)
1048    : _cont(cont), _fr(fr), _vsp(vsp) { 
1049      _count = 0;
1050   #ifdef ASSERT
1051     _map = map;
1052   #endif
1053   }
1054 
1055   inline int verify(void* p) {
1056     int offset = (address)p - (address)_vsp; // in thaw_oops we set the saved link to a local, so if offset is negative, it can be big
1057 
1058 #ifdef ASSERT // this section adds substantial overhead
1059     VMReg reg;
1060     assert(offset >= 0 || p == _fr->saved_link_address(_map),  
1061       "offset: %d reg: %s", offset, (reg = find_register_spilled_here(p, _map), reg != NULL ? reg->name() : "NONE")); // calle-saved register can only be rbp
1062     reg = find_register_spilled_here(p, _map); // expensive operation
1063     if (reg != NULL) log_trace(jvmcont)("reg: %s", reg->name());
1064     log_trace(jvmcont)("p: %p offset: %d %s", p, offset, p == _fr->saved_link_address(_map) ? "(link)" : "");
1065 #else
1066     log_trace(jvmcont)("p: %p offset: %d", p, offset);
1067 #endif
1068 
1069     return offset;
1070   }
1071 
1072   inline bool process(void* p) {
1073     verify(p);
1074     _count++;
1075     return true;
1076   }
1077 };
1078 
1079 class FreezeOopClosure: public ContOopClosure {
1080  private:
1081   void* const _hsp;
1082   intptr_t** _h_saved_link_address;
1083 
1084  protected:
1085   template <class T> inline void do_oop_work(T* p) {
1086     if (!process(p)) return;
1087 
1088   #ifdef ASSERT
1089     oop obj = RootAccess<>::oop_load(p);
1090     print_oop(p, obj);
1091     assert (oopDesc::is_oop_or_null(obj), "invalid oop");
1092   #endif
1093     _cont->add_oop_location(p);
1094 
1095   #ifndef PRODUCT
1096     int offset = verify(p);
1097     if (offset >= 0) { // rbp could be stored in the callee frame. because frames are stored differently on the h-stack, we don't mark if outside frame
1098       address hloc = (address)_hsp + offset;
1099       log_trace(jvmcont)("Marking oop at %p (offset: %d)", hloc, offset);
1100       assert (_cont->in_writestack(hloc), "");
1101       memset(hloc, 0xba, sizeof(T)); // mark oop location
1102     } else {
1103       assert (p == (T*)_fr->saved_link_address(_map), "");
1104       address hloc = (address) _h_saved_link_address;
1105       if (hloc != NULL) { // could be NULL for rbp of the yield frame
1106         log_trace(jvmcont)("Marking oop at %p (offset: %d)", hloc, offset);
1107         memset(hloc, 0xba, sizeof(intptr_t*)); // mark oop locations
1108       }
1109     }
1110   #endif
1111   }
1112  public:
1113   FreezeOopClosure(ContMirror* cont, frame* fr, void* vsp, void* hsp, intptr_t** h_saved_link_address, RegisterMap* map)
1114    : ContOopClosure(cont, fr, map, vsp), _hsp(hsp), _h_saved_link_address(h_saved_link_address) { assert (cont->in_stack(hsp), ""); }
1115   virtual void do_oop(oop* p)       { do_oop_work(p); }
1116   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1117 
1118   virtual void do_derived_oop(oop *base_loc, oop *derived_loc) { 
1119     assert(Universe::heap()->is_in_or_null(*base_loc), "not an oop");
1120     assert(derived_loc != base_loc, "Base and derived in same location");
1121     verify(base_loc);
1122     verify(derived_loc);
1123 
1124     intptr_t offset = cast_from_oop<intptr_t>(*derived_loc) - cast_from_oop<intptr_t>(*base_loc);
1125 
1126     log_trace(jvmcont)(
1127       "Continuation freeze derived pointer@" INTPTR_FORMAT " - Derived: " INTPTR_FORMAT " Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: " INTX_FORMAT ")",
1128       p2i(derived_loc), p2i((address)*derived_loc), p2i((address)*base_loc), p2i(base_loc), offset);
1129 
1130     int hloc_offset = (address)derived_loc - (address)_vsp;
1131     intptr_t* hloc;
1132     if (hloc_offset >= 0) {
1133       hloc = (intptr_t*)((address)_hsp + hloc_offset);
1134     } else {
1135       assert ((intptr_t**)derived_loc == _fr->saved_link_address(_map), "");
1136       hloc = (intptr_t*) _h_saved_link_address;
1137     }
1138     log_trace(jvmcont)("Writing derived pointer offset at %p (offset: %ld, 0x%lx)", hloc, offset, offset);
1139     if (hloc != NULL)
1140       *hloc = offset;
1141   }
1142 };
1143 
1144 class ThawOopClosure: public ContOopClosure {
1145  private:
1146   int _i;
1147 
1148  protected:
1149   template <class T> inline void do_oop_work(T* p) {
1150     if (!process(p)) return;
1151 
1152     oop obj = _cont->obj_at(_i); // does a HeapAccess<IN_HEAP_ARRAY> load barrier
1153     log_trace(jvmcont)("i: %d", _i); print_oop(p, obj);
1154     RootAccess<>::oop_store(p, obj);
1155     _i++;
1156   }
1157  public:
1158   ThawOopClosure(ContMirror* cont, frame* fr, int index, int num_oops, void* vsp, RegisterMap* map) 
1159     : ContOopClosure(cont, fr, map, vsp) { _i = index; }
1160   virtual void do_oop(oop* p)       { do_oop_work(p); }
1161   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1162 
1163   virtual void do_derived_oop(oop *base_loc, oop *derived_loc) { 
1164     assert(Universe::heap()->is_in_or_null(*base_loc), "not an oop: %p (at %p)", (oopDesc*)*base_loc, base_loc);
1165     verify(derived_loc);
1166     verify(base_loc);
1167 
1168     intptr_t offset = *(intptr_t*)derived_loc;
1169 
1170     *derived_loc = cast_to_oop(cast_from_oop<intptr_t>(*base_loc) + offset);
1171 
1172     log_trace(jvmcont)(
1173       "Continuation thaw derived pointer@" INTPTR_FORMAT " - Derived: " INTPTR_FORMAT " Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: " INTX_FORMAT ")",
1174       p2i(derived_loc), p2i((address)*derived_loc), p2i((address)*base_loc), p2i(base_loc), offset);
1175 
1176     assert(derived_loc != base_loc, "Base and derived in same location");
1177   }
1178 };
1179 
1180 #ifndef PRODUCT
1181 static void set_anchor(JavaThread* thread, FrameInfo* fi) {
1182   JavaFrameAnchor* anchor = thread->frame_anchor();
1183   anchor->set_last_Java_sp((intptr_t*)fi->sp);
1184   anchor->set_last_Java_fp((intptr_t*)fi->fp);
1185   anchor->set_last_Java_pc(fi->pc);
1186 
1187   assert(thread->last_frame().cb() != NULL, "");
1188 
1189   log_trace(jvmcont)("set_anchor:");
1190   print_vframe(thread->last_frame());
1191 }
1192 
1193 static void set_anchor(ContMirror& cont) {
1194   FrameInfo fi = { cont.entryPC(), cont.entryFP(), cont.entrySP() };
1195   set_anchor(cont.thread(), &fi);
1196 }
1197 
1198 static inline void clear_anchor(JavaThread* thread) {
1199   thread->frame_anchor()->clear();
1200 }
1201 #endif
1202 
1203 static int count_frames(frame f, intptr_t* bottom) {
1204   RegisterMap map(NULL, false);
1205   int i = 0;
1206   log_trace(jvmcont)("count_frames bottom: %p", bottom); 
1207   while (f.unextended_sp() < bottom) {
1208     i++;
1209     f = f.sender(&map);
1210   }
1211   log_trace(jvmcont)("count_frames #frames: %d", i); 
1212   return i;
1213 }
1214 
1215 static inline int freeze_oops(ContMirror& cont, frame &f, hframe& callee, void* vsp, void* hsp, RegisterMap& map) {
1216   log_trace(jvmcont)("Walking oops (freeze)");
1217 
1218   assert (!map.include_argument_oops(), "");
1219   
1220   FreezeOopClosure oopClosure(&cont, &f, vsp, hsp, !callee.is_empty() ? (intptr_t**)callee.link_address(cont) : NULL, &map);
1221   f.oops_do(&oopClosure, NULL, &oopClosure, &map);
1222 
1223   log_trace(jvmcont)("Done walking oops");
1224   
1225   return oopClosure.count();
1226 }
1227 
1228 static inline size_t freeze_interpreted_frame(ContMirror& cont, frame& f, hframe& hf, address target) {
1229   intptr_t* vsp = interpreted_frame_top(f);
1230   intptr_t* bottom = interpreted_frame_bottom(f);
1231   // if (bottom > cont.entrySP()) bottom = cont.entrySP(); // due to a difference between initial_sp and unextended_sp; need to understand better
1232   // assert (bottom <= cont.entrySP(), "bottom: %p entrySP: %p", bottom, cont.entrySP());
1233   assert (bottom > vsp, "bottom: %p vsp: %p", bottom, vsp);
1234   assert (f.unextended_sp() <= vsp, "frame top: %p unextended_sp: %p", vsp, f.unextended_sp());
1235   const int fsize = (bottom - vsp) * sizeof(intptr_t);
1236 
1237   intptr_t* hsp = (intptr_t*)(target + METADATA_SIZE);
1238   intptr_t* vfp = f.fp();
1239   intptr_t* hfp = hsp + (vfp - vsp);
1240 
1241   assert (*(intptr_t**)(vfp + frame::interpreter_frame_locals_offset) < bottom, "frame bottom: %p locals: %p", 
1242     bottom, *(intptr_t**)(vfp + frame::interpreter_frame_locals_offset));
1243 
1244   // hf is the callee
1245   if (!hf.is_empty()) {
1246     hf.patch_link_relative(cont, hfp);
1247     if (hf.is_interpreted_frame() && hf.get_real_fp_offset(cont, frame::interpreter_frame_sender_sp_offset) == 0) {
1248       hf.patch_real_fp_offset_relative(cont, frame::interpreter_frame_sender_sp_offset, hsp);
1249     }
1250   }
1251 
1252   cont.copy_to_stack(vsp, hsp, fsize);
1253 
1254   hf = cont.new_hframe(hsp, hfp, f.pc(), NULL, true);
1255 
1256   // TODO: for compression, initial_sp seems to always point to itself, and locals points to the previous frame's initial_sp + 1 word
1257   if (*(intptr_t**)(vfp + frame::interpreter_frame_locals_offset) < bottom)
1258     relativize(vfp, hfp, frame::interpreter_frame_sender_sp_offset);
1259   else
1260     hf.patch_real_fp_offset(cont, frame::interpreter_frame_sender_sp_offset, 0);
1261   relativize(vfp, hfp, frame::interpreter_frame_last_sp_offset);
1262   relativize(vfp, hfp, frame::interpreter_frame_initial_sp_offset); // == block_top == block_bottom
1263   relativize(vfp, hfp, frame::interpreter_frame_locals_offset);
1264 
1265   hf.patch_link(cont, 0);
1266   hf.patch_real_fp_offset(cont, frame::interpreter_frame_sender_sp_offset, 0);
1267 
1268   hf.set_size(cont, fsize);
1269   hf.set_uncompressed_size(cont, fsize);
1270   hf.set_num_oops(cont, 0);
1271 
1272   cont.add_size(fsize);
1273   cont.inc_num_interpreted_frames();
1274   cont.inc_num_frames();
1275   
1276   return fsize + METADATA_SIZE;
1277 }
1278 
1279 static inline size_t freeze_compiled_frame(ContMirror& cont, frame& f, hframe& hf, address target) {
1280   intptr_t* vsp = compiled_frame_top(f);
1281   intptr_t* bottom = compiled_frame_bottom(f);
1282   if (false &&  bottom + 2 < cont.entrySP()) { // we don't freeze parameters from entry. this hack tells us if sender is entry.
1283     bottom += compiled_frame_num_parameters(f);
1284   }
1285   assert (bottom > vsp, "bottom: %p vsp: %p", bottom, vsp); 
1286   assert (bottom <= cont.entrySP(), "bottom: %p entrySP: %p", bottom, cont.entrySP());
1287   
1288   const int fsize = (bottom - vsp) * sizeof(intptr_t);
1289   
1290   intptr_t* hsp = (intptr_t*)(target + METADATA_SIZE);
1291 
1292   // hf is the callee
1293   if (!hf.is_empty()) {
1294     hf.patch_link(cont, (long)f.fp());
1295     if (hf.is_interpreted_frame()) {
1296       assert (hf.get_real_fp_offset(cont, frame::interpreter_frame_sender_sp_offset) == 0, "");
1297       hf.patch_real_fp_offset_relative(cont, frame::interpreter_frame_sender_sp_offset, hsp);
1298     }
1299   }
1300 
1301   cont.copy_to_stack(vsp, hsp, fsize);
1302 
1303   hf = cont.new_hframe(hsp, f.fp(), f.pc(), f.cb(), false);
1304   hf.patch_link(cont, 0);
1305 
1306   hf.set_size(cont, fsize);
1307   hf.set_uncompressed_size(cont, 0);
1308   hf.set_num_oops(cont, 0);
1309 
1310   cont.inc_num_frames();
1311   cont.add_size(fsize);
1312   
1313   return fsize + METADATA_SIZE;
1314 }
1315 
1316 // freezes a single frame
1317 static res_freeze freeze_frame(ContMirror& cont, address &target, frame &f, RegisterMap &map, hframe &hf, bool is_top) {
1318   log_trace(jvmcont)("=============================");
1319 
1320   RegisterMap dmap(NULL, false);
1321   print_vframe(f, &dmap);
1322   assert (strcmp(frame_name(f), ENTER_SIG) != 0, "name: %s", frame_name(f));
1323 
1324   const bool is_interpreted = f.is_interpreted_frame();
1325   const bool is_compiled = f.is_compiled_frame();
1326 
1327   assert (!is_interpreted || f.is_interpreted_frame_valid(cont.thread()), "invalid frame");
1328 
1329   if ((is_interpreted && is_interpreted_frame_owning_locks(f)) 
1330         || (is_compiled && is_compiled_frame_owning_locks(cont.thread(), &map, f))) {
1331       return freeze_pinned_monitor;
1332   }
1333 
1334   hframe callee = hf;
1335 
1336   size_t nbytes = 0;
1337   if      (is_compiled)    nbytes = freeze_compiled_frame(cont, f, hf, target);
1338   else if (is_interpreted) nbytes = freeze_interpreted_frame(cont, f, hf, target);
1339   else {
1340     // TODO: support reflection, doPrivileged
1341     log_trace(jvmcont)("not Java: %p", f.pc());
1342     if (log_is_enabled(Trace, jvmcont)) os::print_location(tty, *((intptr_t*)((void*)f.pc())));
1343     return freeze_pinned_native;
1344   }
1345 
1346   if (Continuation::is_cont_bottom_frame(f)) { // XXXXXXXXX
1347     assert (!cont.is_empty(), "");
1348     log_trace(jvmcont)("Fixing return address on bottom frame: %p", cont.pc());
1349     hf.patch_return_pc(cont, cont.pc());
1350   }
1351 
1352   if (!hf.is_interpreted_frame() && Interpreter::contains(hf.return_pc(cont))) { // do after fixing return_pc
1353       cont.add_size(sizeof(intptr_t)); // possible alignment
1354       // cont.add_size(sizeof(intptr_t) * ((CompiledMethod*)hf.cb())->method()->size_of_parameters()); // see thaw_compiled_frame
1355   }
1356 
1357   if (nbytes > 0) {
1358     intptr_t* vsp = frame_top(f);
1359     intptr_t* hsp = (intptr_t*)(target + METADATA_SIZE);
1360     int num_oops = freeze_oops(cont, f, callee, vsp, hsp, map);
1361     hf.set_num_oops(cont, num_oops);
1362   }
1363 
1364   frame sender = f.sender(&map);
1365 
1366   // last condition is after fixing bottom-most frozen frame
1367   assert ((hf.return_pc(cont) != sender.pc()) <= (sender.is_deoptimized_frame() || hf.return_pc(cont) == cont.pc()), "hf.return_pc: %p sender.pc: %p sender.is_deoptimized_frame: %d", hf.return_pc(cont), sender.pc(), sender.is_deoptimized_frame());
1368   if (false) { // TODO: try this instead of deoptimize parameter in thaw
1369     if (sender.is_deoptimized_frame() && hf.return_pc(cont) != cont.pc()) {
1370       log_trace(jvmcont)("re-patching deopt"); // we will deopt again when thawing
1371       hf.patch_return_pc(cont, sender.pc());
1372     }
1373   }
1374 
1375   log_trace(jvmcont)("hframe:");
1376   if (log_is_enabled(Trace, jvmcont)) hf.print(cont);
1377 
1378   target += nbytes;
1379   f = sender;
1380 
1381   return freeze_ok;
1382 }
1383 
1384 // freezes all frames of a single continuation
1385 static bool freeze_continuation(JavaThread* thread, ContMirror& cont, frame& f, RegisterMap& map) {
1386   HandleMark hm(thread); // TODO: necessary?
1387 
1388   LogStreamHandle(Trace, jvmcont) st;
1389 
1390   DEBUG_ONLY(log_trace(jvmcont)("Freeze ### #%lx", cont.hash()));
1391   log_trace(jvmcont)("Freeze 0000 sp: %p fp: %p pc: %p", f.sp(), f.fp(), f.pc());
1392   log_trace(jvmcont)("Freeze 1111 sp: %d fp: 0x%lx pc: %p", cont.sp(), cont.fp(), cont.pc());
1393 
1394   intptr_t* bottom = cont.entrySP(); // (bottom is highest address; stacks grow down)
1395   intptr_t* top = f.sp();
1396 
1397   log_trace(jvmcont)("QQQ AAAAA bottom: %p top: %p size: %ld", bottom, top, (address)bottom - (address)top);
1398 
1399   int size = (bottom - top) * sizeof(intptr_t); // in bytes
1400   int num_frames = count_frames(f, bottom);
1401   size += num_frames * METADATA_SIZE;
1402 
1403   log_trace(jvmcont)("bottom: %p size: %d, count %d", bottom, size, num_frames);
1404   assert (num_frames < 1000 && num_frames > 0 && size > 0, "num_frames: %d size: %d", num_frames, size); // just sanity; sometimes get garbage
1405 
1406   ResourceMark rm(thread); // required for the arrays created in ContMirror::init_arrays(int)
1407   
1408   hframe orig_top_frame = cont.last_frame();
1409   log_trace(jvmcont)("top_hframe before (freeze):");
1410   if (log_is_enabled(Trace, jvmcont)) orig_top_frame.print_on(cont, tty);
1411 
1412   cont.init_write_arrays(size);
1413   
1414   const bool empty = cont.is_empty();
1415   log_trace(jvmcont)("empty: %d", empty);
1416   assert (!CONT_FULL_STACK || empty, "");
1417   assert (!empty || cont.sp() > cont.stack_length() || cont.sp() < 0, "sp: %d stack_length: %d", cont.sp(), cont.stack_length());
1418   assert (orig_top_frame.is_empty() == empty, "empty: %d f.sp: %d f.fp: 0x%lx f.pc: %p", empty, orig_top_frame.sp(), orig_top_frame.fp(), orig_top_frame.pc());
1419 
1420   address target = cont.freeze_target();
1421 
1422   hframe hf;
1423   hframe new_top;
1424   int nframes = 0;
1425   DEBUG_ONLY(frame last_frozen;)
1426   while (f.real_fp() <= bottom) { // sp/unextended_sp aren't accurate enough TODO -- reconsider
1427     DEBUG_ONLY(last_frozen = f;)
1428     res_freeze res = freeze_frame(cont, target, f, map, hf, nframes == 0); // changes f, target,hf
1429     if (res != freeze_ok) { // f hasn't changed
1430         log_trace(jvmcont)("FREEZE FAILED %d", res);
1431         cont.call_pinned(res, f);
1432         return false;
1433     }
1434     if (nframes == 0)
1435       new_top = hf;
1436     nframes++;
1437   }
1438   if (log_is_enabled(Trace, jvmcont)) { log_trace(jvmcont)("Found entry frame: "); print_vframe(f); }
1439   assert (strcmp(frame_name(f), ENTER_SIG) == 0, "name: %s", frame_name(f));
1440 
1441   assert (!empty == Continuation::is_cont_bottom_frame(last_frozen), 
1442     "empty: %d is_cont_bottom_frame(last_frozen): %d", empty, Continuation::is_cont_bottom_frame(last_frozen));
1443 
1444   cont.write_stacks();
1445   if (thread->has_pending_exception()) return false;
1446 
1447   hf      = cont.fix_hframe_afer_write(hf); 
1448   new_top = cont.fix_hframe_afer_write(new_top);
1449   orig_top_frame = cont.fix_hframe_afer_write(orig_top_frame);
1450 
1451   cont.set_last_frame(new_top); // must be done after loop, because we rely on the old top when patching last-copied frame
1452   
1453   // f now points at the entry frame
1454 
1455   assert (hf.is_interpreted_frame() || hf.size(cont) % 16 == 0, "");
1456 
1457   if (empty) {
1458     if (f.is_interpreted_frame()) {
1459       hf.patch_link(cont, 0);
1460     } else {
1461       if (f.is_deoptimized_frame()) {
1462         assert (f.cb()->as_nmethod()->get_original_pc(&f) == f.pc(), "original_pc: %p f.pc(): %p", f.cb()->as_nmethod()->get_original_pc(&f), f.pc());
1463         assert (is_deopt_return(hf.return_pc(cont), f), "must be");
1464         assert (hf.return_pc(cont) != f.pc(), "hf.return_pc(): %p f.pc(): %p", hf.return_pc(cont), f.pc());
1465         log_trace(jvmcont)("Entry frame deoptimized! pc: %p -> original_pc: %p", hf.return_pc(cont), f.pc());
1466       } else // we do not patch if entry is deopt, as we use that information when thawing
1467         hf.patch_return_pc(cont, NULL);
1468     }
1469     assert (hf.sender(cont).is_empty(), "");
1470   } else {
1471     hf.patch_callee(cont, orig_top_frame);
1472 
1473     assert (hf.sender(cont) == orig_top_frame, "");
1474   }
1475 
1476   log_trace(jvmcont)("last h-frame:");
1477   if (log_is_enabled(Trace, jvmcont)) hf.print(cont);
1478 
1479   log_trace(jvmcont)("top_hframe after (freeze):");
1480   if (log_is_enabled(Trace, jvmcont)) cont.last_frame().print_on(cont, tty);
1481 
1482   DEBUG_ONLY(address ret_pc =  return_pc(f, f.is_interpreted_frame());)
1483 
1484   RegisterMap dmap(NULL, false);
1485   f = f.sender(&dmap); // go one frame further, to the entry frame's caller
1486   
1487   // assert (f.pc() == ret_pc, "f.pc: %p return_pc: %p", f.pc(), ret_pc); // probably wrong when Continuation.run is deoptimized
1488   assert (strcmp(frame_name(f), RUN_SIG) == 0, "name: %s", frame_name(f));
1489 
1490   cont.write();
1491 
1492   log_trace(jvmcont)("--- end of freeze_continuation");
1493 
1494   return true;
1495 }
1496 
1497 // recursively call freeze for all continuations up the chain until appropriate scope
1498 static oop freeze_continuations(JavaThread* thread, oop contOop, int scopes, frame& f, RegisterMap &map) {
1499   scopes = 0; // TODO: TEMPORARY
1500   assert (scopes == 0, "scopes: %d", scopes); // TODO: TEMPORARY
1501 
1502   // save old values to restore in case of freeze failure
1503   assert (contOop != NULL, "");
1504   assert (scopes >= 0, "scopes: %d", scopes);
1505 
1506   log_trace(jvmcont)("Freeze ___ cont: %p scopes: %d", (oopDesc*)contOop, scopes);
1507 
1508   ContMirror cont(thread, contOop);
1509   cont.read();
1510 
1511   const int     orig_sp = cont.sp();
1512   const long    orig_fp = cont.fp();
1513   const address orig_pc = cont.pc();
1514   const int     orig_ref_sp = cont.refSP();
1515 
1516   if (freeze_continuation(thread, cont, f, map)) {
1517     if (scopes == 0) {
1518       return contOop;
1519     } else {
1520       oop parent = java_lang_Continuation::parent(contOop);
1521       if (parent != NULL) {
1522         oop ret = freeze_continuations(thread, parent, scopes - 1, f, map);
1523         if (ret != NULL)
1524           return ret;
1525       } else {
1526         assert (false, "scopes: %d", scopes); // TODO: throw exception
1527       }
1528     }
1529   }
1530     
1531   // fail; reset cont
1532   log_trace(jvmcont)("FREEZE FAILED resetting");
1533   cont.set_sp(orig_sp);
1534   cont.set_fp(orig_fp);
1535   cont.set_pc(orig_pc);
1536   cont.set_refSP(orig_ref_sp);
1537   cont.write();
1538 
1539   return NULL; // propagates failure up the recursive call-chain.
1540 }
1541 
1542 // returns the continuation yielding (based on context), or NULL for failure (due to pinning)
1543 // it freezes multiple continuations, depending on contex
1544 // it must set Continuation.stackSize
1545 // sets Continuation.fp/sp to relative indices
1546 //
1547 // In: fi->pc, fi->sp, fi->fp all point to the current (topmost) frame to freeze (the yield frame)
1548 // Out: fi->pc, fi->sp, fi->fp all point to the entry frame
1549 //      unless freezing has failed, in which case fi->pc = 0
1550 // 
1551 JRT_ENTRY(void, Continuation::freeze(JavaThread* thread, FrameInfo* fi, int scopes))
1552   log_trace(jvmcont)("~~~~~~~~~ freeze scopes: %d", scopes);
1553   log_trace(jvmcont)("fi->sp: %p fi->fp: %p fi->pc: %p", fi->sp, fi->fp, fi->pc);
1554   // set_anchor(thread, fi); // DEBUG
1555   print_frames(thread);
1556 
1557   DEBUG_ONLY(thread->_continuation = NULL;)
1558 
1559   HandleMark hm(thread);
1560 
1561   // call here, when we know we can handle safepoints, to initialize Continuation::_entry_method
1562   // potentiall racy, but benign
1563   Continuation::entry_method(thread);
1564   if (thread->has_pending_exception()) {
1565     fi->fp = NULL; fi->sp = NULL; fi->pc = NULL;
1566     log_trace(jvmcont)("=== end of freeze (fail 0)");
1567     return;
1568   }
1569 
1570   oop cont = get_continuation(thread);
1571   assert(cont != NULL && oopDesc::is_oop_or_null(cont), "Invalid cont: %p", (void*)cont);
1572 
1573   RegisterMap map(thread, true);
1574   map.set_include_argument_oops(false);
1575   frame f = thread->last_frame(); // this is the doYield stub frame. last_frame is set up by the call_VM infrastructure
1576   f = f.sender(&map); // this is the yield frame
1577   assert (f.pc() == fi->pc, "");
1578   // The following doesn't work because fi->fp can contain an oop, that a GC doesn't know about when walking.
1579   // frame::update_map_with_saved_link(&map, (intptr_t **)&fi->fp);
1580   // frame f(fi->sp, fi->fp, fi->pc); // the yield frame
1581 
1582   cont = freeze_continuations(thread, cont, scopes, f, map); // changes f
1583   if (cont == NULL) {
1584     fi->fp = NULL; fi->sp = NULL; fi->pc = NULL;
1585     log_trace(jvmcont)("=== end of freeze (fail)");
1586     return;
1587   }
1588   
1589   if (false) // TODO BUG: check what happens in Continuation.run. Does it overwrite the current cont? consider logic of nested conts
1590     set_continuation(thread, cont); 
1591 
1592   log_trace(jvmcont)("Jumping to frame (freeze):");
1593   print_vframe(f);
1594 #ifdef ASSERT
1595   { ResourceMark rm(thread);
1596     assert (strcmp(frame_name(f), RUN_SIG) == 0, "name: %s", frame_name(f)); }
1597 #endif
1598 
1599   fi->sp = f.unextended_sp(); // java_lang_Continuation::entrySP(cont);
1600   fi->fp = f.fp();
1601   fi->pc = f.pc();
1602   // set_anchor(thread, fi);
1603 
1604   log_trace(jvmcont)("ENTRY: sp: %p fp: %p pc: %p", fi->sp, fi->fp, fi->pc);
1605   log_trace(jvmcont)("=== end of freeze");
1606 JRT_END
1607 
1608 static frame thaw_interpreted_frame(ContMirror& cont, hframe& hf, intptr_t* vsp, frame& sender) {
1609   RegisterMap dmap(NULL, false);
1610 
1611   // log_trace(jvmcont)("ZZZZ SENDER 111:"); print_vframe(sender, &dmap);
1612 
1613   intptr_t* hsp = cont.stack_address(hf.sp());
1614   cont.copy_from_stack(hsp, vsp, hf.size(cont));
1615 
1616   // log_trace(jvmcont)("ZZZZ SENDER 222:"); print_vframe(sender, &dmap);
1617 
1618   intptr_t* hfp = cont.stack_address(hf.fp());
1619   intptr_t* vfp = vsp + (hfp - hsp);
1620 
1621   derelativize(vfp, frame::interpreter_frame_last_sp_offset);
1622   derelativize(vfp, frame::interpreter_frame_initial_sp_offset); // == block_top == block_bottom
1623   derelativize(vfp, frame::interpreter_frame_locals_offset);
1624 
1625   intptr_t* unextended_sp = *(intptr_t**)(vfp + frame::interpreter_frame_last_sp_offset);
1626   frame f(vsp, unextended_sp, vfp, hf.pc());
1627 
1628   patch_sender_sp(f, sender.unextended_sp()); // derelativize(vfp, frame::interpreter_frame_sender_sp_offset);
1629 
1630   assert (*(intptr_t**)(vfp + frame::interpreter_frame_locals_offset) < frame_top(sender), "sender top: %p locals: %p", 
1631     frame_top(sender), *(intptr_t**)(vfp + frame::interpreter_frame_locals_offset));
1632 
1633   assert(f.is_interpreted_frame_valid(cont.thread()), "invalid thawed frame");
1634 
1635   cont.dec_num_frames();
1636   cont.dec_num_interpreted_frames();
1637 
1638   return f;
1639 }
1640 
1641 static frame thaw_compiled_frame(ContMirror& cont, hframe& hf, intptr_t* vsp, frame& sender, RegisterMap& map, bool &deoptimized) {
1642 #ifdef _LP64
1643   if ((long)vsp % 16 != 0) {
1644     log_trace(jvmcont)("Aligning compiled frame: %p -> %p", vsp, vsp - 1);
1645     assert(sender.is_interpreted_frame(), "");
1646     vsp--;
1647   }
1648   assert((long)vsp % 16 == 0, "");
1649 #endif
1650 
1651   if (Interpreter::contains(hf.return_pc(cont))) { // false if bottom-most frame, as the return address would be patched to NULL if interpreted
1652     cont.sub_size(sizeof(intptr_t)); // we do this whether or not we've aligned because we add it in freeze_interpreted_frame
1653   }
1654   if (sender.is_interpreted_frame()) {
1655     // int num_of_parameters = ((CompiledMethod*)hf.cb())->method()->size_of_parameters();
1656     // vsp -= num_of_parameters; // we may need this space for deopt TODO
1657     // cont.sub_size(sizeof(intptr_t) * num_of_parameters);
1658   }
1659 
1660   intptr_t* hsp = cont.stack_address(hf.sp());
1661   cont.copy_from_stack(hsp, vsp, hf.size(cont));
1662 
1663   frame f(vsp, (intptr_t*)hf.fp(), hf.pc());
1664 
1665     // TODO get nmethod. Call popNmethod if necessary
1666     // when copying nmethod frames, we need to check for them being made non-reentrant, in which case we need to deopt them
1667     // and turn them into interpreter frames.
1668 
1669   if (f.should_be_deoptimized() && !f.is_deoptimized_frame()) {
1670     log_trace(jvmcont)("Deoptimizing thawed frame");
1671     Deoptimization::deoptimize(cont.thread(), f, &map);
1672     deoptimized = true;
1673   }
1674 
1675   cont.dec_num_frames();
1676 
1677   return f;
1678 }
1679 
1680 static void thaw_oops(ContMirror& cont, frame& f, int oop_index, int num_oops, void* target, RegisterMap& map) {
1681   log_trace(jvmcont)("Walking oops (thaw)");
1682 
1683   // log_trace(jvmcont)("is_top: %d", is_top);
1684   // assert (!is_top || cont.is_map_at_top(map), "");
1685   // assert (!is_top || f.is_interpreted_frame() || f.fp() == (intptr_t*)cont.fp(), "f.fp: %p cont.fp: 0x%lx", f.fp(), cont.fp());
1686 
1687   assert (!map.include_argument_oops(), "");
1688 
1689   intptr_t* fp = f.fp();
1690   frame::update_map_with_saved_link(&map, &fp);
1691 
1692   ResourceMark rm(cont.thread()); // apparently, oop-mapping may require resource allocation
1693   ThawOopClosure oopClosure(&cont, &f, oop_index, num_oops, target, &map);
1694   f.oops_do(&oopClosure, NULL, &oopClosure, &map); // can overwrite cont.fp() (because of update_register_map)
1695   log_trace(jvmcont)("count: %d num_oops: %d", oopClosure.count(), num_oops);
1696   assert(oopClosure.count() == num_oops, "closure oop count different."); 
1697 
1698   // Thawing oops may have overwritten the link in the callee if rbp contained an oop (only possible if we're compiled).
1699   // This only matters when we're the top frame, as that's the value that will be restored into rbp when we jump to continue.
1700   if (fp != f.fp()) {
1701     log_trace(jvmcont)("WHOA link has changed f.fp: %p link: %p", f.fp(), fp);
1702     f.set_fp(fp);
1703   }
1704 
1705   log_trace(jvmcont)("Done walking oops");
1706 }
1707 
1708 static frame thaw_frame(ContMirror& cont, hframe& hf, int oop_index, frame& sender, bool &deoptimized) {
1709   log_trace(jvmcont)("=============================");
1710 
1711   if (log_is_enabled(Trace, jvmcont)) hf.print(cont);
1712 
1713   const int fsize = hf.uncompressed_size(cont) != 0 ? hf.uncompressed_size(cont) : hf.size(cont);
1714   const address bottom = (address) sender.sp();
1715   intptr_t* vsp = (intptr_t*)(bottom - fsize);
1716 
1717   cont.sub_size(fsize);
1718 
1719   log_trace(jvmcont)("hsp: %d hfp: 0x%lx is_bottom: %d", hf.sp(), hf.fp(), hf.is_bottom(cont));
1720   log_trace(jvmcont)("stack_length: %d", cont.stack_length());
1721   log_trace(jvmcont)("bottom: %p vsp: %p fsize: %d", bottom, vsp, fsize);
1722 
1723   bool is_sender_deopt = deoptimized;
1724   address* pc_addr = &(((address*) sender.sp())[-1]); 
1725 
1726   if (is_entry_frame(cont, sender)) {
1727     assert (!is_sender_deopt, "");
1728     assert (!sender.is_deoptimized_frame(), "");
1729     if (sender.is_deoptimized_frame()) {
1730       log_trace(jvmcont)("Sender frame already deopted");
1731       is_sender_deopt = true;
1732     } else if (is_deopt_return(hf.return_pc(cont), sender)) {
1733       log_trace(jvmcont)("Entry frame deoptimized! pc: %p", sender.pc());
1734       *pc_addr = sender.pc(); // just to make the following call think we're walking the stack from the top
1735       sender.deoptimize(cont.thread());
1736       is_sender_deopt = true;
1737     }
1738   }
1739 
1740   address deopt_ret_pc = NULL;
1741   if (is_sender_deopt) {
1742     // this must be done before copying the frame, because the sender's sp might not be correct
1743     // for example, if a compiled frame calls an interpreted frame, its sp must point to a couple of words before 
1744     // the callee's fp, but we always create the frame so that sp = unextended_sp, and so the sp would point to 
1745     // before the callee's locals
1746     deopt_ret_pc = *pc_addr; // we grab the return pc written by deoptimize (about to be clobbered by thaw_x) to restore later
1747     log_trace(jvmcont)("Sender is deopt: %p", deopt_ret_pc);
1748     deoptimized = false;
1749   }
1750 
1751   RegisterMap map(cont.thread(), true, false);
1752   map.set_include_argument_oops(false);
1753 
1754   frame f = hf.is_interpreted_frame() ? thaw_interpreted_frame(cont, hf, vsp, sender)
1755                                       :    thaw_compiled_frame(cont, hf, vsp, sender, map, deoptimized);
1756 
1757   patch_link(f, sender.fp(), hf.is_interpreted_frame());
1758   if (is_sender_deopt) {
1759     log_trace(jvmcont)("Patching sender deopt");
1760     patch_return_pc(f, deopt_ret_pc, hf.is_interpreted_frame());
1761   } else if (is_entry_frame(cont, sender)) {
1762     // the entry frame could have been compiled/deopted since we froze the bottom frame  XXXXXXXX
1763     patch_return_pc(f, sender.pc(), hf.is_interpreted_frame());
1764   }
1765 
1766   assert (!is_entry_frame(cont, sender) || sender.fp() == cont.entryFP(), "sender.fp: %p entryFP: %p", sender.fp(), cont.entryFP());
1767 
1768   thaw_oops(cont, f, oop_index, hf.num_oops(cont), f.sp(), map);
1769 
1770 #ifndef PRODUCT
1771   RegisterMap dmap(NULL, false);
1772   print_vframe(f, &dmap);
1773 #endif
1774 
1775   return f;
1776 }
1777 
1778 static frame thaw_frames(ContMirror& cont, hframe hf, int oop_index, int num_frames, int& count, int &last_oop_index, hframe& last_frame, bool& deoptimized) {
1779   if (num_frames == 0 || hf.is_empty()) {
1780     frame entry(cont.entrySP(), cont.entryFP(), cont.entryPC());
1781     log_trace(jvmcont)("Found entry:");
1782     print_vframe(entry);
1783 
1784   #ifdef ASSERT
1785     { ResourceMark rm(cont.thread());
1786       assert (strcmp(frame_name(entry), ENTER_SIG) == 0, "name: %s", frame_name(entry)); }
1787   #endif
1788 
1789     last_oop_index = oop_index;
1790     last_frame = hf;
1791     deoptimized = false;
1792     // cont.set_refSP(oop_index);
1793     // cont.set_last_frame(hf);
1794     return entry;
1795   }
1796 
1797   hframe hsender = hf.sender(cont);
1798   frame sender = thaw_frames(cont, hsender, oop_index + hf.num_oops(cont), num_frames - 1, count, last_oop_index, last_frame, deoptimized);
1799   frame f = thaw_frame(cont, hf, oop_index, sender, deoptimized);
1800 
1801   assert ((count == 0) == is_entry_frame(cont, sender), ""); 
1802   assert (hf.is_bottom(cont) <= last_frame.is_empty(), "hf.is_bottom(cont): %d last_frame.is_empty(): %d ", hf.is_bottom(cont), last_frame.is_empty());
1803 #ifdef ASSERT
1804   { ResourceMark rm(cont.thread());
1805     assert (!hf.is_bottom(cont) || strcmp(frame_name(f), ENTER0_SIG) == 0, "name: %s", frame_name(f)); }
1806 #endif
1807 
1808   if (count == 0) {
1809     assert (is_entry_frame(cont, sender), "");
1810     assert (!hf.is_bottom(cont) || hf.sender(cont).is_empty(), "");
1811     if (!hf.is_bottom(cont)) { // XXXXXX
1812       log_trace(jvmcont)("Setting return address to return barrier: %p", StubRoutines::cont_returnBarrier());
1813       patch_return_pc(f, StubRoutines::cont_returnBarrier(), f.is_interpreted_frame());
1814     } 
1815     // else {
1816     //   if (sender.is_interpreted_frame()) { // unnecessary now, thanks to enter0
1817     //     // We enter the continuation through an interface call (target.run()), but exit through a virtual call (doContinue())
1818     //     // Alternatively, wrap the call to target.run() inside a private method.
1819     //     patch_return_pc(f, Interpreter::return_entry(vtos, 0, Bytecodes::_invokevirtual), f.is_interpreted_frame());
1820     //   }
1821     // }
1822   }
1823 
1824   assert (!is_entry_frame(cont, sender) || (hf.is_bottom(cont) == last_frame.is_empty()), "hf.is_bottom(cont): %d last_frame.is_empty(): %d ", hf.is_bottom(cont), last_frame.is_empty());
1825   assert (!is_entry_frame(cont, sender) || (hf.is_bottom(cont) != Continuation::is_cont_bottom_frame(f)), "hf.is_bottom(cont): %d is_cont_bottom_frame(f): %d ", hf.is_bottom(cont), Continuation::is_cont_bottom_frame(f));
1826   assert (Continuation::is_cont_bottom_frame(f) <= (num_frames == 1), "num_frames: %d is_cont_bottom_frame(f): %d ", num_frames, Continuation::is_cont_bottom_frame(f));
1827  
1828   count++;
1829   return f;
1830 }
1831 
1832 static inline int thaw_num_frames(bool return_barrier) {
1833   if (CONT_FULL_STACK) {
1834     assert (!return_barrier, "");
1835     return 10000;
1836   } 
1837   return return_barrier ? 1 : 2;
1838 }
1839 
1840 // fi->pc is the return address -- the entry
1841 // fi->sp is the top of the stack after thaw
1842 // fi->fp current rbp
1843 // called after preparations (stack overflow check and making room)
1844 static void thaw1(JavaThread* thread, FrameInfo* fi, const bool return_barrier) {
1845   if (return_barrier) log_trace(jvmcont)("== RETURN BARRIER");
1846   const int num_frames = thaw_num_frames(return_barrier);
1847 
1848   log_trace(jvmcont)("~~~~~~~~~ thaw %d", num_frames);
1849   log_trace(jvmcont)("pc: %p", fi->pc);
1850   log_trace(jvmcont)("rbp: %p", fi->fp);
1851 
1852   // address target = (address)fi->sp; // we leave fi->sp as-is
1853 
1854   oop contOop = get_continuation(thread);
1855   assert(contOop != NULL && oopDesc::is_oop_or_null(contOop), "Invalid cont: %p", (void*)contOop);
1856 
1857   ContMirror cont(thread, contOop);
1858   cont.read();
1859   cont.set_entrySP(fi->sp);
1860   cont.set_entryFP(fi->fp);
1861   if (!return_barrier) { // not return barrier
1862     cont.set_entryPC(fi->pc);
1863   }
1864 
1865   DEBUG_ONLY(log_trace(jvmcont)("THAW ### #%lx", cont.hash()));
1866 
1867 #ifdef ASSERT
1868   set_anchor(cont);
1869   // print_frames(thread);
1870 #endif
1871 
1872   // log_trace(jvmcont)("thaw: TARGET: %p", target);
1873   // log_trace(jvmcont)("QQQ CCCCC bottom: %p top: %p size: %ld", cont.entrySP(), target, (address)cont.entrySP() - target);
1874   assert(num_frames > 0, "num_frames <= 0: %d", num_frames);
1875 
1876   assert(!cont.is_empty(), "no more frames");
1877 
1878   hframe hf = cont.last_frame();
1879   log_trace(jvmcont)("top_hframe before (thaw):");
1880   if (log_is_enabled(Trace, jvmcont)) hf.print_on(cont, tty);
1881 
1882   RegisterMap map(thread, true, false);
1883   map.set_include_argument_oops(false);
1884   assert (map.update_map(), "RegisterMap not set to update");
1885 
1886   DEBUG_ONLY(int orig_num_frames = cont.num_frames();)
1887   int frame_count = 0;
1888   int last_oop_index = 0;
1889   hframe last_frame;
1890   bool deoptimized = false;
1891   frame top = thaw_frames(cont, cont.last_frame(), cont.refSP(), num_frames, frame_count, last_oop_index, last_frame, deoptimized);
1892   cont.set_last_frame(last_frame);
1893   cont.set_refSP(last_oop_index);
1894 
1895   assert (!CONT_FULL_STACK || cont.is_empty(), "");  
1896   assert (cont.is_empty() == cont.last_frame().is_empty(), "cont.is_empty: %d cont.last_frame().is_empty(): %d", cont.is_empty(), cont.last_frame().is_empty());
1897   assert (cont.is_empty() == (cont.max_size() == 0), "cont.is_empty: %d cont.max_size: %lu", cont.is_empty(), cont.max_size());
1898   assert (cont.is_empty() <= (cont.refSP() == cont.refStack()->length()), "cont.is_empty: %d ref_sp: %d refStack.length: %d", cont.is_empty(), cont.refSP(), cont.refStack()->length());
1899   assert (cont.is_empty() == (cont.num_frames() == 0), "cont.is_empty: %d num_frames: %d", cont.is_empty(), cont.num_frames());
1900   assert (cont.is_empty() <= (cont.num_interpreted_frames() == 0), "cont.is_empty: %d num_interpreted_frames: %d", cont.is_empty(), cont.num_interpreted_frames());
1901   assert (cont.num_frames() == orig_num_frames - frame_count, "cont.is_empty: %d num_frames: %d orig_num_frames: %d frame_count: %d", cont.is_empty(), cont.num_frames(), orig_num_frames, frame_count);
1902 
1903   fi->sp = top.sp();
1904   fi->fp = top.fp();
1905   fi->pc = top.pc(); // we'll jump to the current continuation pc // Interpreter::return_entry(vtos, 0, Bytecodes::_invokestatic, true); // 
1906 
1907   log_trace(jvmcont)("thawed %d frames", frame_count);
1908 
1909   log_trace(jvmcont)("top_hframe after (thaw):");
1910   if (log_is_enabled(Trace, jvmcont)) cont.last_frame().print_on(cont, tty);
1911 
1912   cont.write();
1913 
1914 #ifdef ASSERT
1915   set_anchor(thread, fi);
1916   print_frames(thread); // must be done after write(), as frame walking reads fields off the Java objects.
1917   clear_anchor(thread);
1918 #endif
1919 
1920   log_trace(jvmcont)("cont sp: %d fp: %lx", cont.sp(), cont.fp());
1921   log_trace(jvmcont)("fi->sp: %p fi->fp: %p fi->pc: %p", fi->sp, fi->fp, fi->pc);
1922 
1923   log_trace(jvmcont)("Jumping to frame (thaw):");
1924   frame f = frame(fi->sp, fi->fp, fi->pc);
1925   print_vframe(f, NULL);
1926 
1927 #ifdef ASSERT
1928   { ResourceMark rm(thread);
1929     assert (!CONT_FULL_STACK || strcmp(frame_name(f), YIELD_SIG) == 0, "name: %s", frame_name(f)); }
1930 #endif
1931 
1932   DEBUG_ONLY(thread->_continuation = contOop;)
1933 
1934   log_trace(jvmcont)("=== End of thaw");
1935 }
1936 
1937 // static size_t frames_size(oop cont, int frames) {
1938 //   size_t size = 0;
1939 //   int length = java_lang_Continuation::stack(cont)->length();
1940 //   int* hstack = (int*)java_lang_Continuation::stack_base(cont);
1941 //   int sp = java_lang_Continuation::sp(cont);
1942 //   // int fp = java_lang_Continuation::fp(cont);
1943 
1944 //   size = 8;
1945 //   bool last_interpreted = false;
1946 
1947 //   for (int i=0; i < frames && sp >= 0 && sp < length; i++) {
1948 //     HFrameMetadata* md = metadata(to_haddress(hstack, sp));
1949 //     size_t uncompressed_size = md->uncompressed_size;
1950 //     size_t fsize = md->frame_size; // (indices are to 32-bit words)
1951 
1952 //     size += uncompressed_size != 0 ? uncompressed_size : fsize;
1953 
1954 //     bool is_interpreted = uncompressed_size != 0;
1955 //     if (is_interpreted != last_interpreted) {
1956 //       size += 8;
1957 //       last_interpreted = is_interpreted;
1958 //     }
1959 
1960 //     sp += to_index(fsize + METADATA_SIZE);
1961 //     // fp += hstack[fp]; // contains offset to previous fp
1962 //   }
1963 //   log_trace(jvmcont)("frames_size: %lu", size);
1964 //   return size;
1965 // }
1966 
1967 static bool stack_overflow_check(JavaThread* thread, int size, address sp) {
1968   const int page_size = os::vm_page_size();
1969   if (size > page_size) {
1970     if (sp - size < thread->stack_overflow_limit()) {
1971       return false;
1972     }
1973   }
1974   return true;
1975 }
1976 
1977 // In: fi->sp = the sp of the entry frame
1978 // Out: returns the size of frames to thaw or 0 for no more frames or a stack overflow
1979 //      On failure: fi->sp - cont's entry SP
1980 //                  fi->fp - cont's entry FP
1981 //                  fi->pc - overflow? throw StackOverflowError : cont's entry PC
1982 JRT_LEAF(int, Continuation::prepare_thaw(FrameInfo* fi, bool return_barrier))
1983   log_trace(jvmcont)("~~~~~~~~~ prepare_thaw");
1984 
1985   int num_frames = thaw_num_frames(return_barrier);
1986 
1987   log_trace(jvmcont)("prepare_thaw %d %d", return_barrier, num_frames);
1988   log_trace(jvmcont)("pc: %p", fi->pc);
1989   log_trace(jvmcont)("rbp: %p", fi->fp);
1990 
1991   const address bottom = (address)fi->sp; // os::current_stack_pointer(); points to the entry frame
1992   log_trace(jvmcont)("bottom: %p", bottom);
1993 
1994   JavaThread* thread = JavaThread::current();
1995   oop cont = get_continuation(thread);
1996   
1997   // if the entry frame is interpreted, it may leave a parameter on the stack, which would be left there if the return barrier is hit
1998   // assert ((address)java_lang_Continuation::entrySP(cont) - bottom <= 8, "bottom: %p, entrySP: %p", bottom, java_lang_Continuation::entrySP(cont));
1999   int size = java_lang_Continuation::maxSize(cont); // frames_size(cont, num_frames);
2000   if (size == 0) { // no more frames
2001     return 0;
2002   }
2003   if (!stack_overflow_check(thread, size + 300, bottom)) {
2004     fi->pc = StubRoutines::throw_StackOverflowError_entry();
2005     return 0;
2006   }
2007 
2008   size += sizeof(intptr_t); // just in case we have an interpreted entry after which we need to align
2009 
2010   address target = bottom - size;
2011   log_trace(jvmcont)("target: %p", target);
2012   log_trace(jvmcont)("QQQ BBBBB bottom: %p top: %p size: %d", bottom, target, size);
2013 
2014   return size;
2015 JRT_END
2016 
2017 // IN:  fi->sp = the future SP of the topmost thawed frame (where we'll copy the thawed frames)
2018 // Out: fi->sp = the SP of the topmost thawed frame -- the one we will resume at
2019 //      fi->fp = the FP " ...
2020 //      fi->pc = the PC " ...
2021 // JRT_ENTRY(void, Continuation::thaw(JavaThread* thread, FrameInfo* fi, int num_frames))
2022 JRT_LEAF(void, Continuation::thaw(FrameInfo* fi, bool return_barrier))
2023   thaw1(JavaThread::current(), fi, return_barrier);
2024 JRT_END
2025 
2026 Method* Continuation::_entry_method = NULL;
2027 
2028 Method* Continuation::entry_method(Thread* THREAD) {
2029   if (_entry_method == NULL) {
2030     CallInfo callinfo;
2031     Klass* recvrKlass = SystemDictionary::resolve_or_null(vmSymbols::java_lang_Continuation(), THREAD); // SystemDictionary::Continuation_klass();
2032     LinkInfo link_info(recvrKlass, vmSymbols::enter_name(), vmSymbols::continuationEnter_signature());
2033     LinkResolver::resolve_special_call(callinfo, Handle(), link_info, THREAD);
2034     methodHandle method = callinfo.selected_method();
2035     assert(method.not_null(), "should have thrown exception");
2036     _entry_method = method();
2037   }
2038   return _entry_method;
2039 }
2040 
2041 bool Continuation::is_continuation_entry_frame(const frame& f) {
2042   Method* m = frame_method(f);
2043   if (m == NULL)
2044     return false;
2045 
2046   // we can do this because the entry frame is never inlined
2047   return m == _entry_method;
2048 }
2049 // When walking the virtual stack, this method returns true
2050 // iff the frame is a thawed continuation frame whose
2051 // caller is still frozen on the h-stack.
2052 // The continuation object can be extracted from the thread.
2053 bool Continuation::is_cont_bottom_frame(const frame& f) {
2054   return is_return_barrier_entry(return_pc(f, f.is_interpreted_frame()));
2055 }
2056 
2057 static oop find_continuation_for_frame(JavaThread* thread, intptr_t* const sp) {
2058   oop cont = get_continuation(thread);
2059   while (cont != NULL && java_lang_Continuation::entrySP(cont) < sp) 
2060     cont = java_lang_Continuation::parent(cont);
2061   return cont;
2062 }
2063 
2064 address Continuation::get_entry_pc_past_barrier(JavaThread* thread, const frame& f) {
2065   log_trace(jvmcont)("YEYEYEYEYEYEYEEYEY");
2066   oop cont = find_continuation_for_frame(thread, f.sp());
2067   assert (cont != NULL, "");
2068   return java_lang_Continuation::entryPC(cont);
2069 }
2070 
2071 bool Continuation::is_return_barrier_entry(address pc) {
2072   return pc == StubRoutines::cont_returnBarrier();
2073 }
2074 
2075 address Continuation::sender_pc_past_barrier(JavaThread* thread, const frame& f) {
2076   return is_return_barrier_entry(f.pc()) ? get_entry_pc_past_barrier(thread, f) : f.pc();
2077 }
2078 
2079 frame Continuation::fix_continuation_bottom_sender(const frame& callee, frame f, RegisterMap* map) {
2080   if (map->thread() != NULL && is_cont_bottom_frame(callee)) {
2081     f.set_pc_preserve_deopt(get_entry_pc_past_barrier(map->thread(), f));
2082   }
2083   return f;
2084 }
2085 
2086 ///// DEBUGGING
2087 
2088 static void print_oop(void *p, oop obj, outputStream* st) {
2089   if (!log_is_enabled(Trace, jvmcont)) return;
2090 
2091   st->print_cr(INTPTR_FORMAT ": ", p2i(p));
2092   if (obj == NULL) {
2093     st->print_cr("*NULL*"); 
2094   } else {
2095     if (oopDesc::is_oop_or_null(obj)) {
2096       if (obj->is_objArray()) {
2097         st->print_cr("valid objArray: " INTPTR_FORMAT, p2i(obj));
2098       } else {
2099         obj->print_value_on(st);
2100         // obj->print();
2101       }
2102     } else {
2103       st->print_cr("invalid oop: " INTPTR_FORMAT, p2i(obj));
2104     }
2105     st->cr();
2106   }
2107 }
2108 
2109 static void print_vframe(frame f, RegisterMap* map, outputStream* st) {
2110   if (st != NULL && !log_is_enabled(Trace, jvmcont) ) return;
2111   if (st == NULL) st = tty;
2112 
2113   st->print_cr("\tfp: %p real_fp: %p, sp: %p pc: %p usp: %p", f.fp(), f.real_fp(), f.sp(), f.pc(), f.unextended_sp());
2114 
2115   f.print_on(st);
2116 
2117   // st->print("\tpc: "); os::print_location(st, *(intptr_t*)f.pc());
2118   intptr_t* fp = f.fp();
2119   st->print("cb: ");
2120   if (f.cb() == NULL) {
2121     st->print_cr("NULL");
2122     return;
2123   }
2124   f.cb()->print_value_on(st); st->cr();
2125   if (f.is_interpreted_frame()) {
2126     Method* method = f.interpreter_frame_method();
2127     st->print_cr("\tinterpreted");
2128     st->print("\tMethod (at: %p): ", fp + frame::interpreter_frame_method_offset); method->print_short_name(st); st->cr();
2129     st->print_cr("\tcode_size: %d",         method->code_size());
2130     // st->print_cr("base: %p end: %p", method->constMethod()->code_base(), method->constMethod()->code_end());
2131     intptr_t** link_address = (intptr_t**)(fp + frame::link_offset);
2132     st->print_cr("\tlink: %p (at: %p)",    *link_address, link_address);
2133     st->print_cr("\treturn_pc: %p",        *(void**)(fp + frame::return_addr_offset));
2134     st->print_cr("\tssp: %p",              (void*)  (fp + frame::sender_sp_offset));
2135     st->print_cr("\tissp: %p",             *(void**)(fp + frame::interpreter_frame_sender_sp_offset));
2136     st->print_cr("\tlast_sp: %p",          *(void**)(fp + frame::interpreter_frame_last_sp_offset));
2137     st->print_cr("\tinitial_sp: %p",       *(void**)(fp + frame::interpreter_frame_initial_sp_offset));
2138     // st->print_cr("\tmon_block_top: %p",    *(void**)(fp + frame::interpreter_frame_monitor_block_top_offset));
2139     // st->print_cr("\tmon_block_bottom: %p", *(void**)(fp + frame::interpreter_frame_monitor_block_bottom_offset));
2140     st->print_cr("\tlocals: %p",           *(void**)(fp + frame::interpreter_frame_locals_offset));
2141     st->print_cr("\texpression_stack_size: %d", f.interpreter_frame_expression_stack_size());
2142     // st->print_cr("\tcomputed expression_stack_size: %d", interpreter_frame_expression_stack_size(f));
2143     st->print_cr("\tcache: %p",            *(void**)(fp + frame::interpreter_frame_cache_offset));
2144     st->print_cr("\tbcp: %p",              *(void**)(fp + frame::interpreter_frame_bcp_offset));
2145     st->print_cr("\tbci: %d",               method->bci_from(*(address*)(fp + frame::interpreter_frame_bcp_offset)));
2146     st->print_cr("\tmirror: %p",           *(void**)(fp + frame::interpreter_frame_mirror_offset));
2147     // st->print("\tmirror: "); os::print_location(st, *(intptr_t*)(fp + frame::interpreter_frame_mirror_offset), true);
2148     st->print("\treturn_pc: "); os::print_location(st, *(intptr_t*)(fp + frame::return_addr_offset));
2149   } else {
2150     st->print_cr("\tcompiled/C");
2151     if (f.is_compiled_frame())
2152       st->print_cr("\torig_pc: %p",    f.cb()->as_nmethod()->get_original_pc(&f));
2153     // st->print_cr("\torig_pc_address: %p", f.cb()->as_nmethod()->orig_pc_addr(&f));
2154     // st->print_cr("\tlink: %p",       (void*)f.at(frame::link_offset));
2155     // st->print_cr("\treturn_pc: %p",  *(void**)(fp + frame::return_addr_offset));   
2156     // st->print_cr("\tssp: %p",        *(void**)(fp + frame::sender_sp_offset));
2157     st->print_cr("\tcb.size: %d",    f.cb()->frame_size());
2158     intptr_t** link_address = (intptr_t**)(f.real_fp() - frame::sender_sp_offset);
2159     st->print_cr("\tlink: %p (at: %p)", *link_address, link_address);
2160     st->print_cr("\t'real' return_pc: %p",  *(void**)(f.real_fp() - 1)); 
2161     st->print("\t'real' return_pc: "); os::print_location(st, *(intptr_t*)(f.real_fp() - 1));   
2162     // st->print("\treturn_pc: "); os::print_location(st, *(intptr_t*)(fp + frame::return_addr_offset));
2163   }
2164   if (false && map != NULL) {
2165     intptr_t* bottom = frame_bottom(f);
2166     intptr_t* usp = frame_top(f);
2167     long fsize = (address)bottom - (address)usp;
2168     st->print_cr("\tsize: %ld", fsize);
2169     st->print_cr("\tbounds: %p - %p", usp, bottom);
2170 
2171     if (false) {
2172       st->print_cr("--data--");
2173       for(int i=0; i<fsize; i++)
2174         st->print_cr("%p: %x", ((address)usp + i), *((address)usp + i));
2175       st->print_cr("--end data--");
2176     }
2177   }
2178 }
2179 
2180 static void print_frames(JavaThread* thread, outputStream* st) {
2181   if (st != NULL && !log_is_enabled(Trace, jvmcont) ) return;
2182   if (st == NULL) st = tty;
2183 
2184   if (true) {
2185     st->print_cr("------- frames ---------");
2186     RegisterMap map(thread, false);
2187   #ifndef PRODUCT
2188     ResourceMark rm;
2189     FrameValues values;
2190   #endif
2191 
2192     int i = 0;
2193     for (frame f = thread->last_frame(); !f.is_entry_frame(); f = f.sender(&map)) {
2194   #ifndef PRODUCT
2195       f.describe(values, i);
2196   #else
2197       print_vframe(f, &map, st);
2198   #endif 
2199       i++;
2200     }
2201   #ifndef PRODUCT
2202     values.print(thread);
2203   #endif
2204     st->print_cr("======= end frames =========");
2205   }
2206 }
2207 
2208 void ContMirror::print_hframes(outputStream* st) {
2209   if (st != NULL && !log_is_enabled(Trace, jvmcont)) return;
2210   if (st == NULL) st = tty;
2211 
2212   st->print_cr("------- hframes ---------");
2213   st->print_cr("sp: %d length: %d", _sp, _stack_length);
2214   int i = 0;
2215   for (hframe f = last_frame(); !f.is_empty(); f = f.sender(*this)) {
2216     st->print_cr("frame: %d", i);
2217     f.print_on(*this, st);
2218     i++;
2219   }
2220   st->print_cr("======= end hframes =========");
2221 }
2222 
2223 #ifdef ASSERT
2224 // Does a reverse lookup of a RegisterMap. Returns the register, if any, spilled at the given address.
2225 static VMReg find_register_spilled_here(void* p, RegisterMap* map) {
2226   for(int i = 0; i < RegisterMap::reg_count; i++) {
2227     VMReg r = VMRegImpl::as_VMReg(i);
2228     if (p == map->location(r)) return r;
2229   }
2230   return NULL;
2231 }
2232 #endif