9.0.0_r3
public Throwable() {
fillInStackTrace();
}
public synchronized Throwable fillInStackTrace() {
if (stackTrace != null || backtrace != null /* Out of protocol state */ ) {
backtrace = nativeFillInStackTrace();
}
return this;
}
art/runtime/native/java_lang_Throwable.cc
static jobject Throwable_nativeFillInStackTrace(JNIEnv* env, jclass) {
ScopedFastNativeObjectAccess soa(env);
return soa.Self()->CreateInternalStackTrace<false>(soa);
}
template<bool kTransactionActive>
jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const {
// Compute depth of stack, save frames if possible to avoid needing to recompute many.
constexpr size_t kMaxSavedFrames = 256;
std::unique_ptr<ArtMethodDexPcPair[]> saved_frames(new ArtMethodDexPcPair[kMaxSavedFrames]);
FetchStackTraceVisitor count_visitor(const_cast<Thread*>(this),
&saved_frames[0],
kMaxSavedFrames);
count_visitor.WalkStack();
const uint32_t depth = count_visitor.GetDepth();
const uint32_t skip_depth = count_visitor.GetSkipDepth();
// Build internal stack trace.
BuildInternalStackTraceVisitor<kTransactionActive> build_trace_visitor(soa.Self(),
const_cast<Thread*>(this),
skip_depth);
if (!build_trace_visitor.Init(depth)) {
return nullptr; // Allocation failed.
}
// If we saved all of the frames we don't even need to do the actual stack walk. This is faster
// than doing the stack walk twice.
if (depth < kMaxSavedFrames) {
for (size_t i = 0; i < depth; ++i) {
build_trace_visitor.AddFrame(saved_frames[i].first, saved_frames[i].second);
}
} else {
build_trace_visitor.WalkStack();
}
public StackTraceElement[] getStackTrace() {
return getOurStackTrace().clone();
}
private synchronized StackTraceElement[] getOurStackTrace() {
// Initialize stack trace field with information from
// backtrace if this is the first call to this method
//
// Android-changed: test explicitly for equality with
// STACK_TRACE_ELEMENT
if (stackTrace == libcore.util.EmptyArray.STACK_TRACE_ELEMENT ||
(stackTrace == null && backtrace != null) /* Out of protocol state */) {
stackTrace = nativeGetStackTrace(backtrace);//main
backtrace = null;
}
return stackTrace;
}
/art/runtime/native/java_lang_Throwable.cc
namespace art {
static jobjectArray Throwable_nativeGetStackTrace(JNIEnv* env, jclass, jobject javaStackState) {
ScopedFastNativeObjectAccess soa(env);
return Thread::InternalStackTraceToStackTraceElementArray(soa, javaStackState);
}
public StackTraceElement[] getStackTrace() {
StackTraceElement ste[] = VMStack.getThreadStackTrace(this);
return ste != null ? ste : EmptyArray.STACK_TRACE_ELEMENT;
}
61 /**
62 * Retrieves the stack trace from the specified thread.
63 *
64 * @param t
65 * thread of interest
66 * @return an array of stack trace elements, or null if the thread
67 * doesn′t have a stack trace (e.g. because it exited)
68 */
69 @FastNative
70 native public static StackTraceElement[] getThreadStackTrace(Thread t);
static jobjectArray VMStack_getThreadStackTrace(JNIEnv* env, jclass, jobject javaThread) {
ScopedFastNativeObjectAccess soa(env);
auto fn = [](Thread* thread, const ScopedFastNativeObjectAccess& soaa)
REQUIRES_SHARED(Locks::mutator_lock_) -> jobject {
return thread->CreateInternalStackTrace<false>(soaa);
};
jobject trace = GetThreadStack(soa, javaThread, fn);
if (trace == nullptr) {
return nullptr;
}
return Thread::InternalStackTraceToStackTraceElementArray(soa, trace);
}
/art/runtime/thread.cc
jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
const ScopedObjectAccessAlreadyRunnable& soa,
jobject internal,
jobjectArray output_array,
int* stack_depth) {
// Decode the internal stack trace into the depth, method trace and PC trace.
// Subtract one for the methods and PC trace.
int32_t depth = soa.Decode<mirror::Array>(internal)->GetLength() - 1;
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
jobjectArray result;
if (output_array != nullptr) {
// Reuse the array we were given.
result = output_array;
// ...adjusting the number of frames we'll write to not exceed the array length.
const int32_t traces_length =
soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>>(result)->GetLength();
depth = std::min(depth, traces_length);
} else {
// Create java_trace array and place in local reference table
mirror::ObjectArray<mirror::StackTraceElement>* java_traces =
class_linker->AllocStackTraceElementArray(soa.Self(), depth);
if (java_traces == nullptr) {
return nullptr;
}
result = soa.AddLocalReference<jobjectArray>(java_traces);
}
if (stack_depth != nullptr) {
*stack_depth = depth;
}
for (int32_t i = 0; i < depth; ++i) {
ObjPtr<mirror::ObjectArray<mirror::Object>> decoded_traces =
soa.Decode<mirror::Object>(internal)->AsObjectArray<mirror::Object>();
// Methods and dex PC trace is element 0.
DCHECK(decoded_traces->Get(0)->IsIntArray() || decoded_traces->Get(0)->IsLongArray());
ObjPtr<mirror::PointerArray> const method_trace =
ObjPtr<mirror::PointerArray>::DownCast(MakeObjPtr(decoded_traces->Get(0)));
// Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, kRuntimePointerSize);
uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>(
i + method_trace->GetLength() / 2, kRuntimePointerSize);
//main
ObjPtr<mirror::StackTraceElement> obj = CreateStackTraceElement(soa, method, dex_pc);
if (obj == nullptr) {
return nullptr;
}
// We are called from native: use non-transactional mode.
soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>>(result)->Set<false>(i, obj);
}
return result;
}
static ObjPtr<mirror::StackTraceElement> CreateStackTraceElement(
const ScopedObjectAccessAlreadyRunnable& soa,
ArtMethod* method,
uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) {
int32_t line_number;
StackHandleScope<3> hs(soa.Self());
auto class_name_object(hs.NewHandle<mirror::String>(nullptr));
auto source_name_object(hs.NewHandle<mirror::String>(nullptr));
if (method->IsProxyMethod()) {
line_number = -1;
class_name_object.Assign(method->GetDeclaringClass()->GetName());
// source_name_object intentionally left null for proxy methods
} else {
line_number = method->GetLineNumFromDexPC(dex_pc);
// Allocate element, potentially triggering GC
// TODO: reuse class_name_object via Class::name_?
const char* descriptor = method->GetDeclaringClassDescriptor();
CHECK(descriptor != nullptr);
std::string class_name(PrettyDescriptor(descriptor));
class_name_object.Assign(
mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str()));
if (class_name_object == nullptr) {
soa.Self()->AssertPendingOOMException();
return nullptr;
}
const char* source_file = method->GetDeclaringClassSourceFile();
if (line_number == -1) {
// Make the line_number field of StackTraceElement hold the dex pc.
// source_name_object is intentionally left null if we failed to map the dex pc to
// a line number (most probably because there is no debug info). See b/30183883.
line_number = dex_pc;
} else {
if (source_file != nullptr) {
source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
if (source_name_object == nullptr) {
soa.Self()->AssertPendingOOMException();
return nullptr;
}
}
}
}
const char* method_name = method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName();
CHECK(method_name != nullptr);
Handle<mirror::String> method_name_object(
hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name)));
if (method_name_object == nullptr) {
return nullptr;
}
return mirror::StackTraceElement::Alloc(soa.Self(),
class_name_object,
method_name_object,
source_name_object,
line_number);
}