GP-2677: Introduce TraceRmi (API only, experimental)

This commit is contained in:
Dan 2023-04-21 16:17:59 -04:00
parent 0fe70e15fa
commit 1de4dfc9c7
96 changed files with 19314 additions and 214 deletions

1
.gitignore vendored
View File

@ -68,6 +68,7 @@ Release
.classpath
.settings/
.prefs
.pydevproject
# Ignore XTEXT generated dirs/files
*/*/*/*/xtend-gen

View File

@ -20,6 +20,7 @@ apply from: "$rootProject.projectDir/gradle/nativeProject.gradle"
apply from: "$rootProject.projectDir/gradle/distributableGhidraModule.gradle"
apply from: "$rootProject.projectDir/gradle/debugger/hasExecutableJar.gradle"
apply from: "$rootProject.projectDir/gradle/debugger/hasPythonPackage.gradle"
apply plugin: 'eclipse'
eclipse.project.name = 'Debug Debugger-agent-gdb'
@ -33,6 +34,8 @@ dependencies {
testImplementation project(path: ':Framework-AsyncComm', configuration: 'testArtifacts')
testImplementation project(path: ':Framework-Debugging', configuration: 'testArtifacts')
testImplementation project(path: ':Debugger-gadp', configuration: 'testArtifacts')
pypkgInstall project(path: ':Debugger-rmi-trace', configuration: 'pypkgInstall')
}
tasks.nodepJar {

View File

@ -1,7 +1,13 @@
##VERSION: 2.0
##MODULE IP: JSch License
DEVNOTES.txt||GHIDRA||||END|
Module.manifest||GHIDRA||||END|
data/scripts/fallback_info_proc_mappings.gdb||GHIDRA||||END|
data/scripts/fallback_maintenance_info_sections.gdb||GHIDRA||||END|
data/scripts/getpid-linux-i386.gdb||GHIDRA||||END|
data/scripts/wine32_info_proc_mappings.gdb||GHIDRA||||END|
src/main/py/LICENSE||GHIDRA||||END|
src/main/py/README.md||GHIDRA||||END|
src/main/py/ghidragdb/schema.xml||GHIDRA||||END|
src/main/py/pyproject.toml||GHIDRA||||END|
src/main/py/tests/EMPTY||GHIDRA||||END|

View File

@ -21,8 +21,10 @@ public interface GdbBreakpointInsertions {
/**
* Insert a breakpoint
*
* <p>
* This is equivalent to the CLI command: {@code break [LOC]}, or {@code watch [LOC]}, etc.
*
* <p>
* Breakpoints in GDB can get pretty complicated. Depending on the location specification, the
* actual location of the breakpoint may change during the lifetime of an inferior. Take note of
* the breakpoint number to track those changes across breakpoint modification events.

View File

@ -0,0 +1,11 @@
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,3 @@
# Ghidra Trace RMI
Package for connecting GDB to Ghidra via Trace RMI.

View File

@ -0,0 +1,16 @@
## ###
# IP: GHIDRA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from . import util, commands, parameters

View File

@ -0,0 +1,287 @@
## ###
# IP: GHIDRA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from ghidratrace.client import Address, RegVal
import gdb
# NOTE: This map is derived from the ldefs using a script
language_map = {
'aarch64': ['AARCH64:BE:64:v8A', 'AARCH64:LE:64:AppleSilicon', 'AARCH64:LE:64:v8A'],
'aarch64:ilp32': ['AARCH64:BE:32:ilp32', 'AARCH64:LE:32:ilp32', 'AARCH64:LE:64:AppleSilicon'],
'arm_any': ['ARM:BE:32:v8', 'ARM:BE:32:v8T', 'ARM:LE:32:v8', 'ARM:LE:32:v8T'],
'armv2': ['ARM:BE:32:v4', 'ARM:LE:32:v4'],
'armv2a': ['ARM:BE:32:v4', 'ARM:LE:32:v4'],
'armv3': ['ARM:BE:32:v4', 'ARM:LE:32:v4'],
'armv3m': ['ARM:BE:32:v4', 'ARM:LE:32:v4'],
'armv4': ['ARM:BE:32:v4', 'ARM:LE:32:v4'],
'armv4t': ['ARM:BE:32:v4t', 'ARM:LE:32:v4t'],
'armv5': ['ARM:BE:32:v5', 'ARM:LE:32:v5'],
'armv5t': ['ARM:BE:32:v5t', 'ARM:LE:32:v5t'],
'armv5tej': ['ARM:BE:32:v5t', 'ARM:LE:32:v5t'],
'armv6': ['ARM:BE:32:v6', 'ARM:LE:32:v6'],
'armv6-m': ['ARM:BE:32:Cortex', 'ARM:LE:32:Cortex'],
'armv6k': ['ARM:BE:32:Cortex', 'ARM:LE:32:Cortex'],
'armv6kz': ['ARM:BE:32:Cortex', 'ARM:LE:32:Cortex'],
'armv6s-m': ['ARM:BE:32:Cortex', 'ARM:LE:32:Cortex'],
'armv7': ['ARM:BE:32:v7', 'ARM:LE:32:v7'],
'armv7e-m': ['ARM:LE:32:Cortex'],
'armv8-a': ['ARM:BE:32:v8', 'ARM:LE:32:v8'],
'armv8-m.base': ['ARM:BE:32:v8', 'ARM:LE:32:v8'],
'armv8-m.main': ['ARM:BE:32:v8', 'ARM:LE:32:v8'],
'armv8-r': ['ARM:BE:32:v8', 'ARM:LE:32:v8'],
'armv8.1-m.main': ['ARM:BE:32:v8', 'ARM:LE:32:v8'],
'avr:107': ['avr8:LE:24:xmega'],
'avr:31': ['avr8:LE:16:default'],
'avr:51': ['avr8:LE:16:atmega256'],
'avr:6': ['avr8:LE:16:atmega256'],
'hppa2.0w': ['pa-risc:BE:32:default'],
'i386:intel': ['x86:LE:32:default'],
'i386:x86-64': ['x86:LE:64:default'],
'i386:x86-64:intel': ['x86:LE:64:default'],
'i8086': ['x86:LE:16:Protected Mode', 'x86:LE:16:Real Mode'],
'iwmmxt': ['ARM:BE:32:v7', 'ARM:BE:32:v8', 'ARM:BE:32:v8T', 'ARM:LE:32:v7', 'ARM:LE:32:v8', 'ARM:LE:32:v8T'],
'm68hc12': ['HC-12:BE:16:default'],
'm68k': ['68000:BE:32:default'],
'm68k:68020': ['68000:BE:32:MC68020'],
'm68k:68030': ['68000:BE:32:MC68030'],
'm9s12x': ['HCS-12:BE:24:default', 'HCS-12X:BE:24:default'],
'mips:4000': ['MIPS:BE:32:default', 'MIPS:LE:32:default'],
'mips:5000': ['MIPS:BE:64:64-32addr', 'MIPS:BE:64:default', 'MIPS:LE:64:64-32addr', 'MIPS:LE:64:default'],
'mips:micromips': ['MIPS:BE:32:micro'],
'msp:430X': ['TI_MSP430:LE:16:default'],
'powerpc:403': ['PowerPC:BE:32:4xx', 'PowerPC:LE:32:4xx'],
'powerpc:MPC8XX': ['PowerPC:BE:32:MPC8270', 'PowerPC:BE:32:QUICC', 'PowerPC:LE:32:QUICC'],
'powerpc:common': ['PowerPC:BE:32:default', 'PowerPC:LE:32:default'],
'powerpc:common64': ['PowerPC:BE:64:64-32addr', 'PowerPC:BE:64:default', 'PowerPC:LE:64:64-32addr', 'PowerPC:LE:64:default'],
'powerpc:e500': ['PowerPC:BE:32:e500', 'PowerPC:LE:32:e500'],
'powerpc:e500mc': ['PowerPC:BE:64:A2ALT', 'PowerPC:LE:64:A2ALT'],
'powerpc:e500mc64': ['PowerPC:BE:64:A2-32addr', 'PowerPC:BE:64:A2ALT-32addr', 'PowerPC:LE:64:A2-32addr', 'PowerPC:LE:64:A2ALT-32addr'],
'riscv:rv32': ['RISCV:LE:32:RV32G', 'RISCV:LE:32:RV32GC', 'RISCV:LE:32:RV32I', 'RISCV:LE:32:RV32IC', 'RISCV:LE:32:RV32IMC', 'RISCV:LE:32:default'],
'riscv:rv64': ['RISCV:LE:64:RV64G', 'RISCV:LE:64:RV64GC', 'RISCV:LE:64:RV64I', 'RISCV:LE:64:RV64IC', 'RISCV:LE:64:default'],
'sh4': ['SuperH4:BE:32:default', 'SuperH4:LE:32:default'],
'sparc:v9b': ['sparc:BE:32:default', 'sparc:BE:64:default'],
'xscale': ['ARM:BE:32:v6', 'ARM:LE:32:v6'],
'z80': ['z80:LE:16:default', 'z8401x:LE:16:default']
}
data64_compiler_map = {
None: 'pointer64',
}
x86_compiler_map = {
'GNU/Linux': 'gcc',
'Windows': 'Visual Studio',
# This may seem wrong, but Ghidra cspecs really describe the ABI
'Cygwin': 'Visual Studio',
}
compiler_map = {
'DATA:BE:64:default': data64_compiler_map,
'DATA:LE:64:default': data64_compiler_map,
'x86:LE:32:default': x86_compiler_map,
'x86:LE:64:default': x86_compiler_map,
}
def get_arch():
return gdb.selected_inferior().architecture().name()
def get_endian():
parm = gdb.parameter('endian')
if parm != 'auto':
return parm
# Once again, we have to hack using the human-readable 'show'
show = gdb.execute('show endian', to_string=True)
if 'little' in show:
return 'little'
if 'big' in show:
return 'big'
return 'unrecognized'
def get_osabi():
parm = gdb.parameter('osabi')
if not parm in ['auto', 'default']:
return parm
# We have to hack around the fact the GDB won't give us the current OS ABI
# via the API if it is "auto" or "default". Using "show", we can get it, but
# we have to parse output meant for a human. The current value will be on
# the top line, delimited by double quotes. It will be the last delimited
# thing on that line. ("auto" may appear earlier on the line.)
show = gdb.execute('show osabi', to_string=True)
line = show.split('\n')[0]
return line.split('"')[-2]
def compute_ghidra_language():
# First, check if the parameter is set
lang = gdb.parameter('ghidra-language')
if lang != 'auto':
return lang
# Get the list of possible languages for the arch. We'll need to sift
# through them by endian and probably prefer default/simpler variants. The
# heuristic for "simpler" will be 'default' then shortest variant id.
arch = get_arch()
endian = get_endian()
lebe = ':BE:' if endian == 'big' else ':LE:'
if not arch in language_map:
return 'DATA' + lebe + '64:default'
langs = language_map[arch]
matched_endian = sorted(
(l for l in langs if lebe in l),
key=lambda l: 0 if l.endswith(':default') else len(l)
)
if len(matched_endian) > 0:
return matched_endian[0]
# NOTE: I'm disinclined to fall back to a language match with wrong endian.
return 'DATA' + lebe + '64:default'
def compute_ghidra_compiler(lang):
# First, check if the parameter is set
comp = gdb.parameter('ghidra-compiler')
if comp != 'auto':
return comp
# Check if the selected lang has specific compiler recommendations
if not lang in compiler_map:
return 'default'
comp_map = compiler_map[lang]
osabi = get_osabi()
if osabi in comp_map:
return comp_map[osabi]
if None in comp_map:
return comp_map[None]
return 'default'
def compute_ghidra_lcsp():
lang = compute_ghidra_language()
comp = compute_ghidra_compiler(lang)
return lang, comp
class DefaultMemoryMapper(object):
def __init__(self, defaultSpace):
self.defaultSpace = defaultSpace
def map(self, inf: gdb.Inferior, offset: int):
if inf.num == 1:
space = self.defaultSpace
else:
space = f'{self.defaultSpace}{inf.num}'
return self.defaultSpace, Address(space, offset)
def map_back(self, inf: gdb.Inferior, address: Address) -> int:
if address.space == self.defaultSpace and inf.num == 1:
return address.offset
if address.space == f'{self.defaultSpace}{inf.num}':
return address.offset
raise ValueError(f"Address {address} is not in inferior {inf.num}")
DEFAULT_MEMORY_MAPPER = DefaultMemoryMapper('ram')
memory_mappers = {}
def compute_memory_mapper(lang):
if not lang in memory_mappers:
return DEFAULT_MEMORY_MAPPER
return memory_mappers[lang]
class DefaultRegisterMapper(object):
def __init__(self, byte_order):
if not byte_order in ['big', 'little']:
raise ValueError("Invalid byte_order: {}".format(byte_order))
self.byte_order = byte_order
self.union_winners = {}
def map_name(self, inf, name):
return name
def convert_value(self, value, type=None):
if type is None:
type = value.dynamic_type.strip_typedefs()
l = type.sizeof
# l - 1 because array() takes the max index, inclusive
# NOTE: Might like to pre-lookup 'unsigned char', but it depends on the
# architecture *at the time of lookup*.
cv = value.cast(gdb.lookup_type('unsigned char').array(l - 1))
rng = range(l)
if self.byte_order == 'little':
rng = reversed(rng)
return bytes(cv[i] for i in rng)
def map_value(self, inf, name, value):
try:
av = self.convert_value(value)
except gdb.error as e:
raise gdb.GdbError("Cannot convert {}'s value: '{}', type: '{}'"
.format(name, value, value.type))
return RegVal(self.map_name(inf, name), av)
def map_name_back(self, inf, name):
return name
def map_value_back(self, inf, name, value):
return RegVal(self.map_name_back(inf, name), value)
class Intel_x86_64_RegisterMapper(DefaultRegisterMapper):
def __init__(self):
super().__init__('little')
def map_name(self, inf, name):
if name == 'eflags':
return 'rflags'
if name.startswith('zmm'):
# Ghidra only goes up to ymm, right now
return 'ymm' + name[3:]
return super().map_name(inf, name)
def map_value(self, inf, name, value):
rv = super().map_value(inf, name, value)
if rv.name.startswith('ymm') and len(rv.value) > 32:
return RegVal(rv.name, rv.value[-32:])
return rv
def map_name_back(self, inf, name):
if name == 'rflags':
return 'eflags'
DEFAULT_BE_REGISTER_MAPPER = DefaultRegisterMapper('big')
DEFAULT_LE_REGISTER_MAPPER = DefaultRegisterMapper('little')
register_mappers = {
'x86:LE:64:default': Intel_x86_64_RegisterMapper()
}
def compute_register_mapper(lang):
if not lang in register_mappers:
if ':BE:' in lang:
return DEFAULT_BE_REGISTER_MAPPER
if ':LE:' in lang:
return DEFAULT_LE_REGISTER_MAPPER
return register_mappers[lang]

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,540 @@
## ###
# IP: GHIDRA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import time
import gdb
from . import commands
class GhidraHookPrefix(gdb.Command):
"""Commands for exporting data to a Ghidra trace"""
def __init__(self):
super().__init__('ghidra-hook', gdb.COMMAND_NONE, prefix=True)
GhidraHookPrefix()
class HookState(object):
__slots__ = ('installed', 'mem_catchpoint', 'batch')
def __init__(self):
self.installed = False
self.mem_catchpoint = None
self.batch = None
def ensure_batch(self):
if self.batch is None:
self.batch = commands.STATE.client.start_batch()
def end_batch(self):
if self.batch is None:
return
commands.STATE.client.end_batch()
self.batch = None
class InferiorState(object):
__slots__ = ('first', 'regions', 'modules', 'threads', 'breaks', 'visited')
def __init__(self):
self.first = True
# For things we can detect changes to between stops
self.regions = False
self.modules = False
self.threads = False
self.breaks = False
# For frames and threads that have already been synced since last stop
self.visited = set()
def record(self, description=None):
first = self.first
self.first = False
if description is not None:
commands.STATE.trace.snapshot(description)
if first:
commands.put_inferiors()
commands.put_environment()
if self.threads:
commands.put_threads()
self.threads = False
thread = gdb.selected_thread()
if thread is not None:
if first or thread not in self.visited:
commands.put_frames()
self.visited.add(thread)
frame = gdb.selected_frame()
hashable_frame = (thread, frame.level())
if first or hashable_frame not in self.visited:
commands.putreg(frame, frame.architecture().registers())
commands.putmem("$pc", "1", from_tty=False)
commands.putmem("$sp", "1", from_tty=False)
self.visited.add(hashable_frame)
if first or self.regions or self.threads or self.modules:
# Sections, memory syscalls, or stack allocations
commands.put_regions()
self.regions = False
if first or self.modules:
commands.put_modules()
self.modules = False
if first or self.breaks:
commands.put_breakpoints()
self.breaks = False
def record_continued(self):
commands.put_inferiors()
commands.put_threads()
def record_exited(self, exit_code):
inf = gdb.selected_inferior()
ipath = commands.INFERIOR_PATTERN.format(infnum=inf.num)
infobj = commands.STATE.trace.proxy_object_path(ipath)
infobj.set_value('_exit_code', exit_code)
infobj.set_value('_state', 'TERMINATED')
class BrkState(object):
__slots__ = ('break_loc_counts',)
def __init__(self):
self.break_loc_counts = {}
def update_brkloc_count(self, b, count):
self.break_loc_counts[b] = count
def get_brkloc_count(self, b):
return self.break_loc_counts.get(b, 0)
def del_brkloc_count(self, b):
if b not in self.break_loc_counts:
return 0 # TODO: Print a warning?
count = self.break_loc_counts[b]
del self.break_loc_counts[b]
return count
HOOK_STATE = HookState()
BRK_STATE = BrkState()
INF_STATES = {}
def on_new_inferior(event):
trace = commands.STATE.trace
if trace is None:
return
HOOK_STATE.ensure_batch()
with trace.open_tx("New Inferior {}".format(event.inferior.num)):
commands.put_inferiors() # TODO: Could put just the one....
def on_inferior_selected():
inf = gdb.selected_inferior()
if inf.num not in INF_STATES:
return
trace = commands.STATE.trace
if trace is None:
return
HOOK_STATE.ensure_batch()
with trace.open_tx("Inferior {} selected".format(inf.num)):
INF_STATES[inf.num].record()
commands.activate()
def on_inferior_deleted(event):
trace = commands.STATE.trace
if trace is None:
return
if event.inferior.num in INF_STATES:
del INF_STATES[event.inferior.num]
HOOK_STATE.ensure_batch()
with trace.open_tx("Inferior {} deleted".format(event.inferior.num)):
commands.put_inferiors() # TODO: Could just delete the one....
def on_new_thread(event):
inf = gdb.selected_inferior()
if inf.num not in INF_STATES:
return
INF_STATES[inf.num].threads = True
# TODO: Syscall clone/exit to detect thread destruction?
def on_thread_selected():
inf = gdb.selected_inferior()
if inf.num not in INF_STATES:
return
trace = commands.STATE.trace
if trace is None:
return
t = gdb.selected_thread()
HOOK_STATE.ensure_batch()
with trace.open_tx("Thread {}.{} selected".format(inf.num, t.num)):
INF_STATES[inf.num].record()
commands.activate()
def on_frame_selected():
inf = gdb.selected_inferior()
if inf.num not in INF_STATES:
return
trace = commands.STATE.trace
if trace is None:
return
t = gdb.selected_thread()
f = gdb.selected_frame()
HOOK_STATE.ensure_batch()
with trace.open_tx("Frame {}.{}.{} selected".format(inf.num, t.num, f.level())):
INF_STATES[inf.num].record()
commands.activate()
def on_syscall_memory():
inf = gdb.selected_inferior()
if inf.num not in INF_STATES:
return
INF_STATES[inf.num].regions = True
def on_memory_changed(event):
inf = gdb.selected_inferior()
if inf.num not in INF_STATES:
return
trace = commands.STATE.trace
if trace is None:
return
HOOK_STATE.ensure_batch()
with trace.open_tx("Memory *0x{:08x} changed".format(event.address)):
commands.put_bytes(event.address, event.address + event.length,
pages=False, is_mi=False, from_tty=False)
def on_register_changed(event):
gdb.write("Register changed: {}".format(dir(event)))
inf = gdb.selected_inferior()
if inf.num not in INF_STATES:
return
trace = commands.STATE.trace
if trace is None:
return
# I'd rather have a descriptor!
# TODO: How do I get the descriptor from the number?
# For now, just record the lot
HOOK_STATE.ensure_batch()
with trace.open_tx("Register {} changed".format(event.regnum)):
commands.putreg(event.frame, event.frame.architecture().registers())
def on_cont(event):
inf = gdb.selected_inferior()
if inf.num not in INF_STATES:
return
trace = commands.STATE.trace
if trace is None:
return
state = INF_STATES[inf.num]
HOOK_STATE.ensure_batch()
with trace.open_tx("Continued"):
state.record_continued()
def on_stop(event):
if hasattr(event, 'breakpoints') and HOOK_STATE.mem_catchpoint in event.breakpoints:
return
inf = gdb.selected_inferior()
if inf.num not in INF_STATES:
return
trace = commands.STATE.trace
if trace is None:
return
state = INF_STATES[inf.num]
state.visited.clear()
HOOK_STATE.ensure_batch()
with trace.open_tx("Stopped"):
state.record("Stopped")
commands.put_event_thread()
commands.activate()
HOOK_STATE.end_batch()
def on_exited(event):
inf = gdb.selected_inferior()
if inf.num not in INF_STATES:
return
trace = commands.STATE.trace
if trace is None:
return
state = INF_STATES[inf.num]
state.visited.clear()
description = "Exited"
if hasattr(event, 'exit_code'):
description += " with code {}".format(event.exit_code)
HOOK_STATE.ensure_batch()
with trace.open_tx(description):
state.record(description)
if hasattr(event, 'exit_code'):
state.record_exited(event.exit_code)
commands.put_event_thread()
commands.activate()
HOOK_STATE.end_batch()
def notify_others_breaks(inf):
for num, state in INF_STATES.items():
if num != inf.num:
state.breaks = True
def modules_changed():
# Assumption: affects the current inferior
inf = gdb.selected_inferior()
if inf.num not in INF_STATES:
return
INF_STATES[inf.num].modules = True
def on_clear_objfiles(event):
modules_changed()
def on_new_objfile(event):
modules_changed()
def on_free_objfile(event):
modules_changed()
def on_breakpoint_created(b):
inf = gdb.selected_inferior()
notify_others_breaks(inf)
if inf.num not in INF_STATES:
return
trace = commands.STATE.trace
if trace is None:
return
ibpath = commands.INF_BREAKS_PATTERN.format(infnum=inf.num)
HOOK_STATE.ensure_batch()
with trace.open_tx("Breakpoint {} created".format(b.number)):
ibobj = trace.create_object(ibpath)
# Do not use retain_values or it'll remove other locs
commands.put_single_breakpoint(b, ibobj, inf, [])
ibobj.insert()
def on_breakpoint_modified(b):
inf = gdb.selected_inferior()
notify_others_breaks(inf)
if inf.num not in INF_STATES:
return
old_count = BRK_STATE.get_brkloc_count(b)
trace = commands.STATE.trace
if trace is None:
return
ibpath = commands.INF_BREAKS_PATTERN.format(infnum=inf.num)
HOOK_STATE.ensure_batch()
with trace.open_tx("Breakpoint {} modified".format(b.number)):
ibobj = trace.create_object(ibpath)
commands.put_single_breakpoint(b, ibobj, inf, [])
new_count = BRK_STATE.get_brkloc_count(b)
# NOTE: Location may not apply to inferior, but whatever.
for i in range(new_count, old_count):
ikey = commands.INF_BREAK_KEY_PATTERN.format(
breaknum=b.number, locnum=i+1)
ibobj.set_value(ikey, None)
def on_breakpoint_deleted(b):
inf = gdb.selected_inferior()
notify_others_breaks(inf)
if inf.num not in INF_STATES:
return
old_count = BRK_STATE.del_brkloc_count(b)
trace = commands.STATE.trace
if trace is None:
return
bpath = commands.BREAKPOINT_PATTERN.format(breaknum=b.number)
ibobj = trace.proxy_object_path(
commands.INF_BREAKS_PATTERN.format(infnum=inf.num))
HOOK_STATE.ensure_batch()
with trace.open_tx("Breakpoint {} modified".format(b.number)):
trace.proxy_object_path(bpath).remove(tree=True)
for i in range(old_count):
ikey = commands.INF_BREAK_KEY_PATTERN.format(
breaknum=b.number, locnum=i+1)
ibobj.set_value(ikey, None)
def on_before_prompt():
HOOK_STATE.end_batch()
# This will be called by a catchpoint
class GhidraTraceEventMemoryCommand(gdb.Command):
def __init__(self):
super().__init__('ghidra-hook event-memory', gdb.COMMAND_NONE)
def invoke(self, argument, from_tty):
self.dont_repeat()
on_syscall_memory()
GhidraTraceEventMemoryCommand()
def cmd_hook(name):
def _cmd_hook(func):
class _ActiveCommand(gdb.Command):
def __init__(self):
# It seems we can't hook commands using the Python API....
super().__init__(f"ghidra-hook def-{name}", gdb.COMMAND_USER)
gdb.execute(f"""
define {name}
ghidra-hook def-{name}
end
""")
def invoke(self, argument, from_tty):
self.dont_repeat()
func()
def _unhook_command():
gdb.execute(f"""
define {name}
end
""")
func.hook = _ActiveCommand
func.unhook = _unhook_command
return func
return _cmd_hook
@cmd_hook('hookpost-inferior')
def hook_inferior():
on_inferior_selected()
@cmd_hook('hookpost-thread')
def hook_thread():
on_thread_selected()
@cmd_hook('hookpost-frame')
def hook_frame():
on_frame_selected()
# TODO: Checks and workarounds for events missing in gdb 8
def install_hooks():
if HOOK_STATE.installed:
return
HOOK_STATE.installed = True
gdb.events.new_inferior.connect(on_new_inferior)
hook_inferior.hook()
gdb.events.inferior_deleted.connect(on_inferior_deleted)
gdb.events.new_thread.connect(on_new_thread)
hook_thread.hook()
hook_frame.hook()
# Respond to user-driven state changes: (Not target-driven)
gdb.events.memory_changed.connect(on_memory_changed)
gdb.events.register_changed.connect(on_register_changed)
# Respond to target-driven memory map changes:
# group:memory is actually a bit broad, but will probably port better
# One alternative is to name all syscalls that cause a change....
# Ones we could probably omit:
# msync,
# (Deals in syncing file-backed pages to disk.)
# mlock, munlock, mlockall, munlockall, mincore, madvise,
# (Deal in paging. Doesn't affect valid addresses.)
# mbind, get_mempolicy, set_mempolicy, migrate_pages, move_pages
# (All NUMA stuff)
#
if HOOK_STATE.mem_catchpoint is not None:
HOOK_STATE.mem_catchpoint.enabled = True
else:
breaks_before = set(gdb.breakpoints())
gdb.execute("""
catch syscall group:memory
commands
silent
ghidra-hook event-memory
cont
end
""")
HOOK_STATE.mem_catchpoint = (
set(gdb.breakpoints()) - breaks_before).pop()
gdb.events.cont.connect(on_cont)
gdb.events.stop.connect(on_stop)
gdb.events.exited.connect(on_exited) # Inferior exited
gdb.events.clear_objfiles.connect(on_clear_objfiles)
gdb.events.free_objfile.connect(on_free_objfile)
gdb.events.new_objfile.connect(on_new_objfile)
gdb.events.breakpoint_created.connect(on_breakpoint_created)
gdb.events.breakpoint_deleted.connect(on_breakpoint_deleted)
gdb.events.breakpoint_modified.connect(on_breakpoint_modified)
gdb.events.before_prompt.connect(on_before_prompt)
def remove_hooks():
if not HOOK_STATE.installed:
return
HOOK_STATE.installed = False
gdb.events.new_inferior.disconnect(on_new_inferior)
hook_inferior.unhook()
gdb.events.inferior_deleted.disconnect(on_inferior_deleted)
gdb.events.new_thread.disconnect(on_new_thread)
hook_thread.unhook()
hook_frame.unhook()
gdb.events.memory_changed.disconnect(on_memory_changed)
gdb.events.register_changed.disconnect(on_register_changed)
HOOK_STATE.mem_catchpoint.enabled = False
gdb.events.cont.disconnect(on_cont)
gdb.events.stop.disconnect(on_stop)
gdb.events.exited.disconnect(on_exited) # Inferior exited
gdb.events.clear_objfiles.disconnect(on_clear_objfiles)
gdb.events.free_objfile.disconnect(on_free_objfile)
gdb.events.new_objfile.disconnect(on_new_objfile)
gdb.events.breakpoint_created.disconnect(on_breakpoint_created)
gdb.events.breakpoint_deleted.disconnect(on_breakpoint_deleted)
gdb.events.breakpoint_modified.disconnect(on_breakpoint_modified)
gdb.events.before_prompt.disconnect(on_before_prompt)
def enable_current_inferior():
inf = gdb.selected_inferior()
INF_STATES[inf.num] = InferiorState()
def disable_current_inferior():
inf = gdb.selected_inferior()
if inf.num in INF_STATES:
# Silently ignore already disabled
del INF_STATES[inf.num]

View File

@ -0,0 +1,653 @@
## ###
# IP: GHIDRA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from concurrent.futures import Future, Executor
import re
from ghidratrace import sch
from ghidratrace.client import MethodRegistry, ParamDesc, Address, AddressRange
import gdb
from . import commands, hooks, util
class GdbExecutor(Executor):
def submit(self, fn, *args, **kwargs):
fut = Future()
def _exec():
try:
result = fn(*args, **kwargs)
hooks.HOOK_STATE.end_batch()
fut.set_result(result)
except Exception as e:
fut.set_exception(e)
gdb.post_event(_exec)
return fut
REGISTRY = MethodRegistry(GdbExecutor())
def extre(base, ext):
return re.compile(base.pattern + ext)
AVAILABLE_PATTERN = re.compile('Available\[(?P<pid>\\d*)\]')
BREAKPOINT_PATTERN = re.compile('Breakpoints\[(?P<breaknum>\\d*)\]')
BREAK_LOC_PATTERN = extre(BREAKPOINT_PATTERN, '\[(?P<locnum>\\d*)\]')
INFERIOR_PATTERN = re.compile('Inferiors\[(?P<infnum>\\d*)\]')
INF_BREAKS_PATTERN = extre(INFERIOR_PATTERN, '\.Breakpoints')
ENV_PATTERN = extre(INFERIOR_PATTERN, '\.Environment')
THREADS_PATTERN = extre(INFERIOR_PATTERN, '\.Threads')
THREAD_PATTERN = extre(THREADS_PATTERN, '\[(?P<tnum>\\d*)\]')
STACK_PATTERN = extre(THREAD_PATTERN, '\.Stack')
FRAME_PATTERN = extre(STACK_PATTERN, '\[(?P<level>\\d*)\]')
REGS_PATTERN = extre(FRAME_PATTERN, '.Registers')
MEMORY_PATTERN = extre(INFERIOR_PATTERN, '\.Memory')
MODULES_PATTERN = extre(INFERIOR_PATTERN, '\.Modules')
def find_availpid_by_pattern(pattern, object, err_msg):
mat = pattern.fullmatch(object.path)
if mat is None:
raise TypeError(f"{object} is not {err_msg}")
pid = int(mat['pid'])
return pid
def find_availpid_by_obj(object):
return find_availpid_by_pattern(AVAILABLE_PATTERN, object, "an Available")
def find_inf_by_num(infnum):
for inf in gdb.inferiors():
if inf.num == infnum:
return inf
raise KeyError(f"Inferiors[{infnum}] does not exist")
def find_inf_by_pattern(object, pattern, err_msg):
mat = pattern.fullmatch(object.path)
if mat is None:
raise TypeError(f"{object} is not {err_msg}")
infnum = int(mat['infnum'])
return find_inf_by_num(infnum)
def find_inf_by_obj(object):
return find_inf_by_pattern(object, INFERIOR_PATTERN, "an Inferior")
def find_inf_by_infbreak_obj(object):
return find_inf_by_pattern(object, INF_BREAKS_PATTERN,
"a BreakpointLocationContainer")
def find_inf_by_env_obj(object):
return find_inf_by_pattern(object, ENV_PATTERN, "an Environment")
def find_inf_by_threads_obj(object):
return find_inf_by_pattern(object, THREADS_PATTERN, "a ThreadContainer")
def find_inf_by_mem_obj(object):
return find_inf_by_pattern(object, MEMORY_PATTERN, "a Memory")
def find_inf_by_modules_obj(object):
return find_inf_by_pattern(object, MODULES_PATTERN, "a ModuleContainer")
def find_thread_by_num(inf, tnum):
for t in inf.threads():
if t.num == tnum:
return t
raise KeyError(f"Inferiors[{inf.num}].Threads[{tnum}] does not exist")
def find_thread_by_pattern(pattern, object, err_msg):
mat = pattern.fullmatch(object.path)
if mat is None:
raise TypeError(f"{object} is not {err_msg}")
infnum = int(mat['infnum'])
tnum = int(mat['tnum'])
inf = find_inf_by_num(infnum)
return find_thread_by_num(inf, tnum)
def find_thread_by_obj(object):
return find_thread_by_pattern(THREAD_PATTERN, object, "a Thread")
def find_thread_by_stack_obj(object):
return find_thread_by_pattern(STACK_PATTERN, object, "a Stack")
def find_frame_by_level(thread, level):
# Because threads don't have any attribute to get at frames
thread.switch()
f = gdb.selected_frame()
# Navigate up or down, because I can't just get by level
down = level - f.level()
while down > 0:
f = f.older()
if f is None:
raise KeyError(
f"Inferiors[{thread.inferior.num}].Threads[{thread.num}].Stack[{level}] does not exist")
down -= 1
while down < 0:
f = f.newer()
if f is None:
raise KeyError(
f"Inferiors[{thread.inferior.num}].Threads[{thread.num}].Stack[{level}] does not exist")
down += 1
assert f.level() == level
return f
def find_frame_by_pattern(pattern, object, err_msg):
mat = pattern.fullmatch(object.path)
if mat is None:
raise TypeError(f"{object} is not {err_msg}")
infnum = int(mat['infnum'])
tnum = int(mat['tnum'])
level = int(mat['level'])
inf = find_inf_by_num(infnum)
t = find_thread_by_num(inf, tnum)
return find_frame_by_level(t, level)
def find_frame_by_obj(object):
return find_frame_by_pattern(FRAME_PATTERN, object, "a StackFrame")
def find_frame_by_regs_obj(object):
return find_frame_by_pattern(REGS_PATTERN, object,
"a RegisterValueContainer")
# Because there's no method to get a register by name....
def find_reg_by_name(f, name):
for reg in f.architecture().registers():
if reg.name == name:
return reg
raise KeyError(f"No such register: {name}")
# Oof. no gdb/Python method to get breakpoint by number
# I could keep my own cache in a dict, but why?
def find_bpt_by_number(breaknum):
# TODO: If len exceeds some threshold, use binary search?
for b in gdb.breakpoints():
if b.number == breaknum:
return b
raise KeyError(f"Breakpoints[{breaknum}] does not exist")
def find_bpt_by_pattern(pattern, object, err_msg):
mat = pattern.fullmatch(object.path)
if mat is None:
raise TypeError(f"{object} is not {err_msg}")
breaknum = int(mat['breaknum'])
return find_bpt_by_number(breaknum)
def find_bpt_by_obj(object):
return find_bpt_by_pattern(BREAKPOINT_PATTERN, object, "a BreakpointSpec")
def find_bptlocnum_by_pattern(pattern, object, err_msg):
mat = pattern.fullmatch(object.path)
if mat is None:
raise TypError(f"{object} is not {err_msg}")
breaknum = int(mat['breaknum'])
locnum = int(mat['locnum'])
return breaknum, locnum
def find_bptlocnum_by_obj(object):
return find_bptlocnum_by_pattern(BREAK_LOC_PATTERN, object,
"a BreakpointLocation")
def find_bpt_loc_by_obj(object):
breaknum, locnum = find_bptlocnum_by_obj(object)
bpt = find_bpt_by_number(breaknum)
# Requires gdb-13.1 or later
return bpt.locations[locnum - 1] # Display is 1-up
def switch_inferior(inferior):
if gdb.selected_inferior().num == inferior.num:
return
gdb.execute("inferior {}".format(inferior.num))
@REGISTRY.method
def execute(cmd: str, to_string: bool=False):
"""Execute a CLI command."""
return gdb.execute(cmd, to_string=to_string)
@REGISTRY.method(action='refresh')
def refresh_available(node: sch.Schema('AvailableContainer')):
"""List processes on gdb's host system."""
with commands.open_tracked_tx('Refresh Available'):
gdb.execute('ghidra trace put-available')
@REGISTRY.method(action='refresh')
def refresh_breakpoints(node: sch.Schema('BreakpointContainer')):
"""
Refresh the list of breakpoints (including locations for the current
inferior).
"""
with commands.open_tracked_tx('Refresh Breakpoints'):
gdb.execute('ghidra trace put-breakpoints')
@REGISTRY.method(action='refresh')
def refresh_inferiors(node: sch.Schema('InferiorContainer')):
"""Refresh the list of inferiors."""
with commands.open_tracked_tx('Refresh Inferiors'):
gdb.execute('ghidra trace put-inferiors')
@REGISTRY.method(action='refresh')
def refresh_inf_breakpoints(node: sch.Schema('BreakpointLocationContainer')):
"""
Refresh the breakpoint locations for the inferior.
In the course of refreshing the locations, the breakpoint list will also be
refreshed.
"""
switch_inferior(find_inf_by_infbreak_obj(node))
with commands.open_tracked_tx('Refresh Breakpoint Locations'):
gdb.execute('ghidra trace put-breakpoints')
@REGISTRY.method(action='refresh')
def refresh_environment(node: sch.Schema('Environment')):
"""Refresh the environment descriptors (arch, os, endian)."""
switch_inferior(find_inf_by_env_obj(node))
with commands.open_tracked_tx('Refresh Environment'):
gdb.execute('ghidra trace put-environment')
@REGISTRY.method(action='refresh')
def refresh_threads(node: sch.Schema('ThreadContainer')):
"""Refresh the list of threads in the inferior."""
switch_inferior(find_inf_by_threads_obj(node))
with commands.open_tracked_tx('Refresh Threads'):
gdb.execute('ghidra trace put-threads')
@REGISTRY.method(action='refresh')
def refresh_stack(node: sch.Schema('Stack')):
"""Refresh the backtrace for the thread."""
find_thread_by_stack_obj(node).switch()
with commands.open_tracked_tx('Refresh Stack'):
gdb.execute('ghidra trace put-frames')
@REGISTRY.method(action='refresh')
def refresh_registers(node: sch.Schema('RegisterValueContainer')):
"""Refresh the register values for the frame."""
find_frame_by_regs_obj(node).select()
# TODO: Groups?
with commands.open_tracked_tx('Refresh Registers'):
gdb.execute('ghidra trace putreg')
@REGISTRY.method(action='refresh')
def refresh_mappings(node: sch.Schema('Memory')):
"""Refresh the list of memory regions for the inferior."""
switch_inferior(find_inf_by_mem_obj(node))
with commands.open_tracked_tx('Refresh Memory Regions'):
gdb.execute('ghidra trace put-regions')
@REGISTRY.method(action='refresh')
def refresh_modules(node: sch.Schema('ModuleContainer')):
"""
Refresh the modules and sections list for the inferior.
This will refresh the sections for all modules, not just the selected one.
"""
switch_inferior(find_inf_by_modules_obj(node))
with commands.open_tracked_tx('Refresh Modules'):
gdb.execute('ghidra trace put-modules')
@REGISTRY.method(action='activate')
def activate_inferior(inferior: sch.Schema('Inferior')):
"""Switch to the inferior."""
switch_inferior(find_inf_by_obj(inferior))
@REGISTRY.method(action='activate')
def activate_thread(thread: sch.Schema('Thread')):
"""Switch to the thread."""
find_thread_by_obj(thread).switch()
@REGISTRY.method(action='activate')
def activate_frame(frame: sch.Schema('StackFrame')):
"""Select the frame."""
find_frame_by_obj(frame).select()
@REGISTRY.method
def add_inferior(container: sch.Schema('InferiorContainer')):
"""Add a new inferior."""
gdb.execute('add-inferior')
@REGISTRY.method(action='delete')
def delete_inferior(inferior: sch.Schema('Inferior')):
"""Remove the inferior."""
inf = find_inf_by_obj(inferior)
gdb.execute(f'remove-inferior {inf.num}')
# TODO: Separate method for each of core, exec, remote, etc...?
@REGISTRY.method
def connect(inferior: sch.Schema('Inferior'), spec: str):
"""Connect to a target machine or process."""
switch_inferior(find_inf_by_obj(inferior))
gdb.execute(f'target {spec}')
@REGISTRY.method(action='attach')
def attach_obj(inferior: sch.Schema('Inferior'), target: sch.Schema('Attachable')):
"""Attach the inferior to the given target."""
switch_inferior(find_inf_by_obj(inferior))
pid = find_availpid_by_obj(target)
gdb.execute(f'attach {pid}')
@REGISTRY.method(action='attach')
def attach_pid(inferior: sch.Schema('Inferior'), pid: int):
"""Attach the inferior to the given target."""
switch_inferior(find_inf_by_obj(inferior))
gdb.execute(f'attach {pid}')
@REGISTRY.method
def detach(inferior: sch.Schema('Inferior')):
"""Detach the inferior's target."""
switch_inferior(find_inf_by_obj(inferior))
gdb.execute('detach')
@REGISTRY.method(action='launch')
def launch_main(inferior: sch.Schema('Inferior'),
file: ParamDesc(str, display='File'),
args: ParamDesc(str, display='Arguments')=''):
"""
Start a native process with the given command line, stopping at 'main'
(start).
If 'main' is not defined in the file, this behaves like 'run'.
"""
switch_inferior(find_inf_by_obj(inferior))
gdb.execute(f'''
file {file}
set args {args}
start
''')
@REGISTRY.method(action='launch', condition=util.GDB_VERSION.major >= 9)
def launch_loader(inferior: sch.Schema('Inferior'),
file: ParamDesc(str, display='File'),
args: ParamDesc(str, display='Arguments')=''):
"""
Start a native process with the given command line, stopping at first
instruction (starti).
"""
switch_inferior(find_inf_by_obj(inferior))
gdb.execute(f'''
file {file}
set args {args}
starti
''')
@REGISTRY.method(action='launch')
def launch_run(inferior: sch.Schema('Inferior'),
file: ParamDesc(str, display='File'),
args: ParamDesc(str, display='Arguments')=''):
"""
Run a native process with the given command line (run).
The process will not stop until it hits one of your breakpoints, or it is
signaled.
"""
switch_inferior(find_inf_by_obj(inferior))
gdb.execute(f'''
file {file}
set args {args}
run
''')
@REGISTRY.method
def kill(inferior: sch.Schema('Inferior')):
"""Kill execution of the inferior."""
switch_inferior(find_inf_by_obj(inferior))
gdb.execute('kill')
@REGISTRY.method
def resume(inferior: sch.Schema('Inferior')):
"""Continue execution of the inferior."""
switch_inferior(find_inf_by_obj(inferior))
gdb.execute('continue')
@REGISTRY.method
def interrupt():
"""Interrupt the execution of the debugged program."""
gdb.execute('interrupt')
@REGISTRY.method
def step_into(thread: sch.Schema('Thread'), n: ParamDesc(int, display='N')=1):
"""Step one instruction exactly (stepi)."""
find_thread_by_obj(thread).switch()
gdb.execute('stepi')
@REGISTRY.method
def step_over(thread: sch.Schema('Thread'), n: ParamDesc(int, display='N')=1):
"""Step one instruction, but proceed through subroutine calls (nexti)."""
find_thread_by_obj(thread).switch()
gdb.execute('nexti')
@REGISTRY.method
def step_out(thread: sch.Schema('Thread')):
"""Execute until the current stack frame returns (finish)."""
find_thread_by_obj(thread).switch()
gdb.execute('finish')
@REGISTRY.method(action='step_ext')
def step_advance(thread: sch.Schema('Thread'), address: Address):
"""Continue execution up to the given address (advance)."""
t = find_thread_by_obj(thread)
t.switch()
offset = thread.trace.memory_mapper.map_back(t.inferior, address)
gdb.execute(f'advance *0x{offset:x}')
@REGISTRY.method(action='step_ext')
def step_return(thread: sch.Schema('Thread'), value: int=None):
"""Skip the remainder of the current function (return)."""
find_thread_by_obj(thread).switch()
if value is None:
gdb.execute('return')
else:
gdb.execute(f'return {value}')
@REGISTRY.method(action='break_sw_execute')
def break_sw_execute_address(inferior: sch.Schema('Inferior'), address: Address):
"""Set a breakpoint (break)."""
inf = find_inf_by_obj(inferior)
offset = inferior.trace.memory_mapper.map_back(inf, address)
gdb.execute(f'break *0x{offset:x}')
@REGISTRY.method(action='break_sw_execute')
def break_sw_execute_expression(expression: str):
"""Set a breakpoint (break)."""
# TODO: Escape?
gdb.execute(f'break {expression}')
@REGISTRY.method(action='break_hw_execute')
def break_hw_execute_address(inferior: sch.Schema('Inferior'), address: Address):
"""Set a hardware-assisted breakpoint (hbreak)."""
inf = find_inf_by_obj(inferior)
offset = inferior.trace.memory_mapper.map_back(inf, address)
gdb.execute(f'hbreak *0x{offset:x}')
@REGISTRY.method(action='break_hw_execute')
def break_hw_execute_expression(expression: str):
"""Set a hardware-assisted breakpoint (hbreak)."""
# TODO: Escape?
gdb.execute(f'hbreak {expression}')
@REGISTRY.method(action='break_read')
def break_read_range(inferior: sch.Schema('Inferior'), range: AddressRange):
"""Set a read watchpoint (rwatch)."""
inf = find_inf_by_obj(inferior)
offset_start = inferior.trace.memory_mapper.map_back(
inf, Address(range.space, range.min))
gdb.execute(
f'rwatch -location *((char(*)[{range.length()}]) 0x{offset_start:x})')
@REGISTRY.method(action='break_read')
def break_read_expression(expression: str):
"""Set a read watchpoint (rwatch)."""
gdb.execute(f'rwatch {expression}')
@REGISTRY.method(action='break_write')
def break_write_range(inferior: sch.Schema('Inferior'), range: AddressRange):
"""Set a watchpoint (watch)."""
inf = find_inf_by_obj(inferior)
offset_start = inferior.trace.memory_mapper.map_back(
inf, Address(range.space, range.min))
gdb.execute(
f'watch -location *((char(*)[{range.length()}]) 0x{offset_start:x})')
@REGISTRY.method(action='break_write')
def break_write_expression(expression: str):
"""Set a watchpoint (watch)."""
gdb.execute(f'watch {expression}')
@REGISTRY.method(action='break_access')
def break_access_range(inferior: sch.Schema('Inferior'), range: AddressRange):
"""Set an access watchpoint (awatch)."""
inf = find_inf_by_obj(inferior)
offset_start = inferior.trace.memory_mapper.map_back(
inf, Address(range.space, range.min))
gdb.execute(
f'awatch -location *((char(*)[{range.length()}]) 0x{offset_start:x})')
@REGISTRY.method(action='break_access')
def break_access_expression(expression: str):
"""Set an access watchpoint (awatch)."""
gdb.execute(f'awatch {expression}')
@REGISTRY.method(action='break_ext')
def break_event(spec: str):
"""Set a catchpoint (catch)."""
gdb.execute(f'catch {spec}')
@REGISTRY.method(action='toggle')
def toggle_breakpoint(breakpoint: sch.Schema('BreakpointSpec'), enabled: bool):
"""Toggle a breakpoint."""
bpt = find_bpt_by_obj(breakpoint)
bpt.enabled = enabled
@REGISTRY.method(action='toggle', condition=util.GDB_VERSION.major >= 13)
def toggle_breakpoint_location(location: sch.Schema('BreakpointLocation'), enabled: bool):
"""Toggle a breakpoint location."""
loc = find_bpt_loc_by_obj(location)
loc.enabled = enabled
@REGISTRY.method(action='toggle', condition=util.GDB_VERSION.major < 13)
def toggle_breakpoint_location(location: sch.Schema('BreakpointLocation'), enabled: bool):
"""Toggle a breakpoint location."""
bptnum, locnum = find_bptlocnum_by_obj(location)
cmd = 'enable' if enabled else 'disable'
gdb.execute(f'{cmd} {bptnum}.{locnum}')
@REGISTRY.method(action='delete')
def delete_breakpoint(breakpoint: sch.Schema('BreakpointSpec')):
"""Delete a breakpoint."""
bpt = find_bpt_by_obj(breakpoint)
bpt.delete()
@REGISTRY.method
def read_mem(inferior: sch.Schema('Inferior'), range: AddressRange):
"""Read memory."""
inf = find_inf_by_obj(inferior)
offset_start = inferior.trace.memory_mapper.map_back(
inf, Address(range.space, range.min))
with commands.open_tracked_tx('Read Memory'):
gdb.execute(f'ghidra trace putmem 0x{offset_start:x} {range.length()}')
@REGISTRY.method
def write_mem(inferior: sch.Schema('Inferior'), address: Address, data: bytes):
"""Write memory."""
inf = find_inf_by_obj(inferior)
offset = inferior.trace.memory_mapper.map_back(inf, address)
inf.write_memory(offset, data)
@REGISTRY.method
def write_reg(frame: sch.Schema('Frame'), name: str, value: bytes):
"""Write a register."""
f = find_frame_by_obj(frame)
f.select()
inf = gdb.selected_inferior()
mname, mval = frame.trace.register_mapper.map_value_back(inf, name, value)
reg = find_reg_by_name(f, mname)
size = int(gdb.parse_and_eval(f'sizeof(${mname})'))
arr = '{' + ','.join(str(b) for b in mval) + '}'
gdb.execute(f'set ((unsigned char[{size}])${mname}) = {arr}')

View File

@ -0,0 +1,46 @@
## ###
# IP: GHIDRA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import gdb
# TODO: I don't know how to register a custom parameter prefix. I would rather
# these were 'ghidra language' and 'ghidra compiler'
class GhidraLanguageParameter(gdb.Parameter):
"""
The language id for Ghidra traces. Set this to 'auto' to try to derive it
from 'show arch' and 'show endian'. Otherwise, set it to a Ghidra
LanguageID.
"""
def __init__(self):
super().__init__('ghidra-language', gdb.COMMAND_DATA, gdb.PARAM_STRING)
self.value = 'auto'
GhidraLanguageParameter()
class GhidraCompilerParameter(gdb.Parameter):
"""
The compiler spec id for Ghidra traces. Set this to 'auto' to try to derive
it from 'show osabi'. Otherwise, set it to a Ghidra CompilerSpecID. Note
that valid compiler spec ids depend on the language id.
"""
def __init__(self):
super().__init__('ghidra-compiler', gdb.COMMAND_DATA, gdb.PARAM_STRING)
self.value = 'auto'
GhidraCompilerParameter()

View File

@ -0,0 +1,413 @@
<context>
<schema name="Session" elementResync="NEVER" attributeResync="NEVER">
<interface name="Access" />
<interface name="Attacher" />
<interface name="Interpreter" />
<interface name="Interruptible" />
<interface name="Launcher" />
<interface name="ActiveScope" />
<interface name="EventScope" />
<interface name="FocusScope" />
<interface name="Aggregate" />
<element schema="VOID" />
<attribute name="Inferiors" schema="InferiorContainer" required="yes" fixed="yes" />
<attribute name="Available" schema="AvailableContainer" required="yes" fixed="yes" />
<attribute name="Breakpoints" schema="BreakpointContainer" required="yes" fixed="yes" />
<attribute name="_accessible" schema="BOOL" required="yes" hidden="yes" />
<attribute name="_supported_attach_kinds" schema="SET_ATTACH_KIND" required="yes" hidden="yes" />
<attribute name="_prompt" schema="STRING" required="yes" hidden="yes" />
<attribute name="_parameters" schema="MAP_PARAMETERS" required="yes" hidden="yes" />
<attribute name="_event_thread" schema="OBJECT" hidden="yes" />
<attribute name="_focus" schema="Selectable" required="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="Selectable" elementResync="NEVER" attributeResync="NEVER">
<element schema="OBJECT" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="BreakpointContainer" canonical="yes" elementResync="NEVER" attributeResync="NEVER">
<interface name="BreakpointSpecContainer" />
<element schema="BreakpointSpec" />
<attribute name="_supported_breakpoint_kinds" schema="SET_BREAKPOINT_KIND" required="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="AvailableContainer" canonical="yes" elementResync="ALWAYS" attributeResync="NEVER">
<interface name="Configurable" />
<element schema="Attachable" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute name="_base" schema="INT" />
<attribute schema="VOID" />
</schema>
<schema name="InferiorContainer" canonical="yes" elementResync="NEVER" attributeResync="NEVER">
<interface name="Configurable" />
<element schema="Inferior" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute name="_base" schema="INT" />
<attribute schema="VOID" />
</schema>
<schema name="BreakpointSpec" canonical="yes" elementResync="NEVER" attributeResync="NEVER">
<interface name="BreakpointSpec" />
<interface name="Deletable" />
<interface name="Togglable" />
<element schema="BreakpointLocation" />
<attribute name="_container" schema="BreakpointContainer" required="yes" hidden="yes" />
<attribute name="_expression" schema="STRING" required="yes" hidden="yes" />
<attribute name="_kinds" schema="SET_BREAKPOINT_KIND" required="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute name="_enabled" schema="BOOL" required="yes" hidden="yes" />
<attribute name="Commands" schema="STRING" />
<attribute name="Condition" schema="STRING" />
<attribute name="Hit Count" schema="INT" />
<attribute name="Ignore Count" schema="INT" />
<attribute name="Pending" schema="BOOL" />
<attribute name="Silent" schema="BOOL" />
<attribute name="Temporary" schema="BOOL" />
<attribute schema="VOID" />
</schema>
<schema name="Attachable" elementResync="NEVER" attributeResync="NEVER">
<interface name="Attachable" />
<element schema="VOID" />
<attribute name="_pid" schema="LONG" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="Inferior" elementResync="NEVER" attributeResync="NEVER">
<interface name="Process" />
<interface name="Aggregate" />
<interface name="ExecutionStateful" />
<interface name="Attacher" />
<interface name="Deletable" />
<interface name="Detachable" />
<interface name="Killable" />
<interface name="Launcher" />
<interface name="Resumable" />
<interface name="Steppable" />
<interface name="Interruptible" />
<element schema="VOID" />
<attribute name="Threads" schema="ThreadContainer" required="yes" fixed="yes" />
<attribute name="Breakpoints" schema="BreakpointLocationContainer" required="yes" fixed="yes" />
<attribute name="_exit_code" schema="LONG" />
<attribute name="Environment" schema="Environment" required="yes" fixed="yes" />
<attribute name="Memory" schema="Memory" required="yes" fixed="yes" />
<attribute name="Modules" schema="ModuleContainer" required="yes" fixed="yes" />
<attribute name="_pid" schema="LONG" hidden="yes" />
<attribute name="_state" schema="EXECUTION_STATE" required="yes" hidden="yes" />
<attribute name="_supported_attach_kinds" schema="SET_ATTACH_KIND" required="yes" hidden="yes" />
<attribute name="_parameters" schema="MAP_PARAMETERS" required="yes" hidden="yes" />
<attribute name="_supported_step_kinds" schema="SET_STEP_KIND" required="yes" fixed="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="Environment" elementResync="NEVER" attributeResync="NEVER">
<interface name="Environment" />
<element schema="VOID" />
<attribute name="arch" schema="STRING" />
<attribute name="os" schema="STRING" />
<attribute name="endian" schema="STRING" />
<attribute name="_arch" schema="STRING" hidden="yes" />
<attribute name="_debugger" schema="STRING" hidden="yes" />
<attribute name="_os" schema="STRING" hidden="yes" />
<attribute name="_endian" schema="STRING" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="ModuleContainer" canonical="yes" elementResync="ONCE" attributeResync="NEVER">
<interface name="ModuleContainer" />
<element schema="Module" />
<attribute name="_supports_synthetic_modules" schema="BOOL" fixed="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="Memory" canonical="yes" elementResync="NEVER" attributeResync="NEVER">
<interface name="Memory" />
<element schema="MemoryRegion" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="BreakpointLocation" elementResync="NEVER" attributeResync="NEVER">
<interface name="BreakpointLocation" />
<element schema="VOID" />
<attribute name="_range" schema="RANGE" hidden="yes" />
<attribute name="_spec" schema="BreakpointSpec" required="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="BreakpointLocationContainer" canonical="yes" elementResync="NEVER" attributeResync="NEVER">
<interface name="BreakpointLocationContainer" />
<element schema="BreakpointLocation" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="ThreadContainer" canonical="yes" elementResync="NEVER" attributeResync="NEVER">
<interface name="Configurable" />
<element schema="Thread" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute name="_base" schema="INT" />
<attribute schema="VOID" />
</schema>
<schema name="Method" elementResync="NEVER" attributeResync="NEVER">
<interface name="Method" />
<element schema="VOID" />
<attribute name="_display" schema="STRING" required="yes" fixed="yes" hidden="yes" />
<attribute name="_return_type" schema="TYPE" required="yes" fixed="yes" hidden="yes" />
<attribute name="_parameters" schema="MAP_PARAMETERS" required="yes" fixed="yes" hidden="yes" />
<attribute schema="VOID" fixed="yes" hidden="yes" />
</schema>
<schema name="Thread" elementResync="NEVER" attributeResync="NEVER">
<interface name="Thread" />
<interface name="ExecutionStateful" />
<interface name="Steppable" />
<interface name="Aggregate" />
<element schema="VOID" />
<attribute name="Stack" schema="Stack" required="yes" fixed="yes" />
<attribute name="_tid" schema="LONG" hidden="yes" />
<attribute name="_state" schema="EXECUTION_STATE" required="yes" hidden="yes" />
<attribute name="_supported_step_kinds" schema="SET_STEP_KIND" required="yes" fixed="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute name="Advance" schema="Method" required="yes" fixed="yes" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="Module" elementResync="NEVER" attributeResync="NEVER">
<interface name="Module" />
<element schema="VOID" />
<attribute name="Sections" schema="SectionContainer" required="yes" fixed="yes" />
<attribute name="Symbols" schema="SymbolContainer" required="yes" fixed="yes" />
<attribute name="range" schema="RANGE" />
<attribute name="module name" schema="STRING" />
<attribute name="_module_name" schema="STRING" required="yes" hidden="yes" />
<attribute name="_range" schema="RANGE" required="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="MemoryRegion" elementResync="NEVER" attributeResync="NEVER">
<interface name="MemoryRegion" />
<element schema="VOID" />
<attribute name="_offset" schema="LONG" required="yes" fixed="yes" hidden="yes" />
<attribute name="_objfile" schema="STRING" required="yes" fixed="yes" hidden="yes" />
<attribute name="_readable" schema="BOOL" required="yes" hidden="yes" />
<attribute name="_writable" schema="BOOL" required="yes" hidden="yes" />
<attribute name="_executable" schema="BOOL" required="yes" hidden="yes" />
<attribute name="_range" schema="RANGE" required="yes" hidden="yes" />
<attribute name="_memory" schema="Memory" required="yes" fixed="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="SectionContainer" canonical="yes" elementResync="NEVER" attributeResync="NEVER">
<interface name="SectionContainer" />
<element schema="Section" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="Stack" canonical="yes" elementResync="NEVER" attributeResync="NEVER">
<interface name="Stack" />
<element schema="StackFrame" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="SymbolContainer" canonical="yes" elementResync="ONCE" attributeResync="NEVER">
<interface name="SymbolNamespace" />
<element schema="Symbol" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="Symbol" elementResync="NEVER" attributeResync="NEVER">
<interface name="Symbol" />
<element schema="VOID" />
<attribute name="_size" schema="LONG" fixed="yes" hidden="yes" />
<attribute name="_namespace" schema="SymbolContainer" required="yes" fixed="yes" hidden="yes" />
<attribute name="_data_type" schema="DATA_TYPE" fixed="yes" hidden="yes" />
<attribute name="_value" schema="ADDRESS" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute name="_bpt" schema="STRING" />
<attribute schema="VOID" />
</schema>
<schema name="StackFrame" elementResync="NEVER" attributeResync="NEVER">
<interface name="StackFrame" />
<interface name="Aggregate" />
<element schema="VOID" />
<attribute name="_function" schema="STRING" hidden="yes" />
<attribute name="Registers" schema="RegisterValueContainer" required="yes" fixed="yes" />
<attribute name="_pc" schema="ADDRESS" required="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="Section" elementResync="NEVER" attributeResync="NEVER">
<interface name="Section" />
<element schema="VOID" />
<attribute name="range" schema="RANGE" />
<attribute name="_module" schema="Module" required="yes" fixed="yes" hidden="yes" />
<attribute name="_range" schema="RANGE" required="yes" fixed="yes" />
<attribute name="_offset" schema="INT" required="no" fixed="yes" />
<attribute name="_objfile" schema="STRING" required="no" fixed="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="RegisterValueContainer" canonical="yes" elementResync="ONCE" attributeResync="NEVER">
<interface name="RegisterContainer" />
<interface name="RegisterBank" />
<element schema="RegisterValue" />
<attribute name="_descriptions" schema="RegisterValueContainer" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="RegisterValue" elementResync="NEVER" attributeResync="NEVER">
<interface name="Register" />
<element schema="VOID" />
<attribute name="_container" schema="OBJECT" required="yes" fixed="yes" hidden="yes" />
<attribute name="_length" schema="INT" required="yes" fixed="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
</context>

View File

@ -0,0 +1,286 @@
## ###
# IP: GHIDRA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from collections import namedtuple
import re
import gdb
GdbVersion = namedtuple('GdbVersion', ['full', 'major', 'minor'])
def _compute_gdb_ver():
blurb = gdb.execute('show version', to_string=True)
top = blurb.split('\n')[0]
full = top.split(' ')[-1]
major, minor = full.split('.')[:2]
return GdbVersion(full, int(major), int(minor))
GDB_VERSION = _compute_gdb_ver()
MODULES_CMD_V8 = 'maintenance info sections ALLOBJ'
MODULES_CMD_V11 = 'maintenance info sections -all-objects'
OBJFILE_PATTERN_V8 = re.compile("\\s*Object file: (?P<name>.*)")
OBJFILE_PATTERN_V11 = re.compile(
"\\s*((Object)|(Exec)) file: `(?P<name>.*)', file type (?P<type>.*)")
OBJFILE_SECTION_PATTERN_V8 = re.compile("\\s*" +
"0x(?P<vmaS>[0-9A-Fa-f]+)\\s*->\\s*" +
"0x(?P<vmaE>[0-9A-Fa-f]+)\\s+at\\s+" +
"0x(?P<offset>[0-9A-Fa-f]+)\\s*:\\s*" +
"(?P<name>\\S+)\\s+" +
"(?P<attrs>.*)")
OBJFILE_SECTION_PATTERN_V9 = re.compile("\\s*" +
"\\[\\s*(?P<idx>\\d+)\\]\\s+" +
"0x(?P<vmaS>[0-9A-Fa-f]+)\\s*->\\s*" +
"0x(?P<vmaE>[0-9A-Fa-f]+)\\s+at\\s+" +
"0x(?P<offset>[0-9A-Fa-f]+)\\s*:\\s*" +
"(?P<name>\\S+)\\s+" +
"(?P<attrs>.*)")
GNU_DEBUGDATA_PREFIX = ".gnu_debugdata for "
class Module(namedtuple('BaseModule', ['name', 'base', 'max', 'sections'])):
pass
class Section(namedtuple('BaseSection', ['name', 'start', 'end', 'offset', 'attrs'])):
def better(self, other):
start = self.start if self.start != 0 else other.start
end = self.end if self.end != 0 else other.end
offset = self.offset if self.offset != 0 else other.offset
attrs = dict.fromkeys(self.attrs)
attrs.update(dict.fromkeys(other.attrs))
return Section(self.name, start, end, offset, list(attrs))
def try_hexint(val, name):
try:
return int(val, 16)
except ValueError:
gdb.write("Invalid {}: {}".format(name, val), stream=gdb.STDERR)
return 0
# AFAICT, Objfile does not give info about load addresses :(
class ModuleInfoReader(object):
def name_from_line(self, line):
mat = self.objfile_pattern.fullmatch(line)
if mat is None:
return None
n = mat['name']
if n.startswith(GNU_DEBUGDATA_PREFIX):
return None
return None if mat is None else mat['name']
def section_from_line(self, line):
mat = self.section_pattern.fullmatch(line)
if mat is None:
return None
start = try_hexint(mat['vmaS'], 'section start')
end = try_hexint(mat['vmaE'], 'section end')
offset = try_hexint(mat['offset'], 'section offset')
name = mat['name']
attrs = [a for a in mat['attrs'].split(' ') if a != '']
return Section(name, start, end, offset, attrs)
def finish_module(self, name, sections):
alloc = {k: s for k, s in sections.items() if 'ALLOC' in s.attrs}
if len(alloc) == 0:
return Module(name, 0, 0, alloc)
# TODO: This may not be the module base, depending on headers
base_addr = min(s.start - s.offset for s in alloc.values())
max_addr = max(s.end for s in alloc.values())
return Module(name, base_addr, max_addr, alloc)
def get_modules(self):
modules = {}
out = gdb.execute(self.cmd, to_string=True)
name = None
sections = None
for line in out.split('\n'):
n = self.name_from_line(line)
if n is not None:
if name is not None:
modules[name] = self.finish_module(name, sections)
name = n
sections = {}
continue
if name is None:
# Don't waste time parsing if no module
continue
s = self.section_from_line(line)
if s is not None:
if s.name in sections:
s = s.better(sections[s.name])
sections[s.name] = s
if name is not None:
modules[name] = self.finish_module(name, sections)
return modules
class ModuleInfoReaderV8(ModuleInfoReader):
cmd = MODULES_CMD_V8
objfile_pattern = OBJFILE_PATTERN_V8
section_pattern = OBJFILE_SECTION_PATTERN_V8
class ModuleInfoReaderV9(ModuleInfoReader):
cmd = MODULES_CMD_V8
objfile_pattern = OBJFILE_PATTERN_V8
section_pattern = OBJFILE_SECTION_PATTERN_V9
class ModuleInfoReaderV11(ModuleInfoReader):
cmd = MODULES_CMD_V11
objfile_pattern = OBJFILE_PATTERN_V11
section_pattern = OBJFILE_SECTION_PATTERN_V9
def _choose_module_info_reader():
if GDB_VERSION.major == 8:
return ModuleInfoReaderV8()
elif GDB_VERSION.major == 9:
return ModuleInfoReaderV9()
elif GDB_VERSION.major == 10:
return ModuleInfoReaderV9()
elif GDB_VERSION.major == 11:
return ModuleInfoReaderV11()
elif GDB_VERSION.major == 12:
return ModuleInfoReaderV11()
elif GDB_VERSION.major > 12:
return ModuleInfoReaderV11()
else:
raise gdb.GdbError(
"GDB version not recognized by ghidragdb: " + GDB_VERSION.full)
MODULE_INFO_READER = _choose_module_info_reader()
REGIONS_CMD = 'info proc mappings'
REGION_PATTERN_V8 = re.compile("\\s*" +
"0x(?P<start>[0-9,A-F,a-f]+)\\s+" +
"0x(?P<end>[0-9,A-F,a-f]+)\\s+" +
"0x(?P<size>[0-9,A-F,a-f]+)\\s+" +
"0x(?P<offset>[0-9,A-F,a-f]+)\\s+" +
"(?P<objfile>.*)")
REGION_PATTERN_V12 = re.compile("\\s*" +
"0x(?P<start>[0-9,A-F,a-f]+)\\s+" +
"0x(?P<end>[0-9,A-F,a-f]+)\\s+" +
"0x(?P<size>[0-9,A-F,a-f]+)\\s+" +
"0x(?P<offset>[0-9,A-F,a-f]+)\\s+" +
"(?P<perms>[rwsxp\\-]+)\\s+" +
"(?P<objfile>.*)")
class Region(namedtuple('BaseRegion', ['start', 'end', 'offset', 'perms', 'objfile'])):
pass
class RegionInfoReader(object):
def region_from_line(self, line):
mat = self.region_pattern.fullmatch(line)
if mat is None:
return None
start = try_hexint(mat['start'], 'region start')
end = try_hexint(mat['end'], 'region end')
offset = try_hexint(mat['offset'], 'region offset')
perms = self.get_region_perms(mat)
objfile = mat['objfile']
return Region(start, end, offset, perms, objfile)
def get_regions(self):
regions = []
out = gdb.execute(self.cmd, to_string=True)
for line in out.split('\n'):
r = self.region_from_line(line)
if r is None:
continue
regions.append(r)
return regions
def full_mem(self):
# TODO: This may not work for Harvard architectures
sizeptr = int(gdb.parse_and_eval('sizeof(void*)')) * 8
return Region(0, 1 << sizeptr, 0, None, 'full memory')
class RegionInfoReaderV8(RegionInfoReader):
cmd = REGIONS_CMD
region_pattern = REGION_PATTERN_V8
def get_region_perms(self, mat):
return None
class RegionInfoReaderV12(RegionInfoReader):
cmd = REGIONS_CMD
region_pattern = REGION_PATTERN_V12
def get_region_perms(self, mat):
return mat['perms']
def _choose_region_info_reader():
if 8 <= GDB_VERSION.major < 12:
return RegionInfoReaderV8()
elif GDB_VERSION.major >= 12:
return RegionInfoReaderV12()
else:
raise gdb.GdbError(
"GDB version not recognized by ghidragdb: " + GDB_VERSION.full)
REGION_INFO_READER = _choose_region_info_reader()
BREAK_LOCS_CMD = 'info break {}'
BREAK_PATTERN = re.compile('')
BREAK_LOC_PATTERN = re.compile('')
class BreakpointLocation(namedtuple('BaseBreakpointLocation', ['address', 'enabled', 'thread_groups'])):
pass
class BreakpointLocationInfoReaderV8(object):
def breakpoint_from_line(self, line):
pass
def location_from_line(self, line):
pass
def get_locations(self, breakpoint):
pass
class BreakpointLocationInfoReaderV13(object):
def get_locations(self, breakpoint):
return breakpoint.locations
def _choose_breakpoint_location_info_reader():
if 8 <= GDB_VERSION.major < 13:
return BreakpointLocationInfoReaderV8()
elif GDB_VERSION.major >= 13:
return BreakpointLocationInfoReaderV13()
else:
raise gdb.GdbError(
"GDB version not recognized by ghidragdb: " + GDB_VERSION.full)
BREAKPOINT_LOCATION_INFO_READER = _choose_breakpoint_location_info_reader()

View File

@ -0,0 +1,25 @@
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[project]
name = "ghidragdb"
version = "10.4"
authors = [
{ name="Ghidra Development Team" },
]
description = "Ghidra's Plugin for gdb"
readme = "README.md"
requires-python = ">=3.7"
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
]
dependencies = [
"ghidratrace==10.4",
]
[project.urls]
"Homepage" = "https://github.com/NationalSecurityAgency/ghidra"
"Bug Tracker" = "https://github.com/NationalSecurityAgency/ghidra/issues"

View File

@ -30,49 +30,49 @@ import ghidra.dbg.util.ShellUtils;
public enum GdbLinuxSpecimen implements DebuggerTestSpecimen, DebuggerModelTestUtils {
SLEEP {
@Override
String getCommandLine() {
public String getCommandLine() {
return DummyProc.which("expTraceableSleep");
}
},
FORK_EXIT {
@Override
String getCommandLine() {
public String getCommandLine() {
return DummyProc.which("expFork");
}
},
CLONE_EXIT {
@Override
String getCommandLine() {
public String getCommandLine() {
return DummyProc.which("expCloneExit");
}
},
PRINT {
@Override
String getCommandLine() {
public String getCommandLine() {
return DummyProc.which("expPrint");
}
},
REGISTERS {
@Override
String getCommandLine() {
public String getCommandLine() {
return DummyProc.which("expRegisters");
}
},
SPIN_STRIPPED {
@Override
String getCommandLine() {
public String getCommandLine() {
return DummyProc.which("expSpin.stripped");
}
},
STACK {
@Override
String getCommandLine() {
public String getCommandLine() {
return DummyProc.which("expStack");
}
};
abstract String getCommandLine();
public abstract String getCommandLine();
@Override
public DummyProc runDummy() throws Throwable {

View File

@ -20,6 +20,7 @@ apply from: "$rootProject.projectDir/gradle/nativeProject.gradle"
apply from: "$rootProject.projectDir/gradle/distributableGhidraModule.gradle"
apply from: "$rootProject.projectDir/gradle/debugger/hasExecutableJar.gradle"
apply from: "$rootProject.projectDir/gradle/debugger/hasPythonPackage.gradle"
apply plugin: 'eclipse'
eclipse.project.name = 'Debug Debugger-agent-lldb'
@ -33,6 +34,8 @@ dependencies {
testImplementation project(path: ':Framework-AsyncComm', configuration: 'testArtifacts')
testImplementation project(path: ':Framework-Debugging', configuration: 'testArtifacts')
testImplementation project(path: ':Debugger-gadp', configuration: 'testArtifacts')
pypkgInstall project(path: ':Debugger-rmi-trace', configuration: 'pypkgInstall')
}
tasks.nodepJar {

View File

@ -5,7 +5,9 @@
.project||NONE||reviewed||END|
Module.manifest||GHIDRA||||END|
build.gradle||GHIDRA||||END|
data/InstructionsForBuildingLLDBInterface.txt||GHIDRA||||END|
src/llvm-project/lldb/bindings/java/java-typemaps.swig||Apache License 2.0 with LLVM Exceptions||||END|
src/llvm-project/lldb/bindings/java/java.swig||Apache License 2.0 with LLVM Exceptions||||END|
src/llvm-project/lldb/build_script||GHIDRA||||END|
src/main/py/LICENSE||GHIDRA||||END|
src/main/py/README.md||GHIDRA||||END|
src/main/py/ghidralldb/schema.xml||GHIDRA||||END|
src/main/py/pyproject.toml||GHIDRA||||END|

View File

@ -0,0 +1,11 @@
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,3 @@
# Ghidra Trace RMI
Package for connecting LLDB to Ghidra via Trace RMI.

View File

@ -0,0 +1,16 @@
## ###
# IP: GHIDRA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from . import util, commands

View File

@ -0,0 +1,261 @@
## ###
# IP: GHIDRA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from ghidratrace.client import Address, RegVal
import lldb
from . import util
# NOTE: This map is derived from the ldefs using a script
language_map = {
'aarch64': ['AARCH64:BE:64:v8A', 'AARCH64:LE:64:AppleSilicon', 'AARCH64:LE:64:v8A'],
'armv7': ['ARM:BE:32:v7', 'ARM:LE:32:v7'],
'armv7k': ['ARM:BE:32:v7', 'ARM:LE:32:v7'],
'armv7s': ['ARM:BE:32:v7', 'ARM:LE:32:v7'],
'arm64': ['ARM:BE:64:v8', 'ARM:LE:64:v8'],
'arm64_32': ['ARM:BE:32:v8', 'ARM:LE:32:v8'],
'arm64e': ['ARM:BE:64:v8', 'ARM:LE:64:v8'],
'i386': ['x86:LE:32:default'],
'thumbv7': ['ARM:BE:32:v7', 'ARM:LE:32:v7'],
'thumbv7k': ['ARM:BE:32:v7', 'ARM:LE:32:v7'],
'thumbv7s': ['ARM:BE:32:v7', 'ARM:LE:32:v7'],
'x86_64': ['x86:LE:64:default'],
'wasm32': ['x86:LE:64:default'],
}
data64_compiler_map = {
None: 'pointer64',
}
x86_compiler_map = {
'freebsd': 'gcc',
'linux': 'gcc',
'netbsd': 'gcc',
'ps4': 'gcc',
'ios': 'clang',
'macosx': 'clang',
'tvos': 'clang',
'watchos': 'clang',
'windows': 'Visual Studio',
# This may seem wrong, but Ghidra cspecs really describe the ABI
'Cygwin': 'Visual Studio',
}
compiler_map = {
'DATA:BE:64:default': data64_compiler_map,
'DATA:LE:64:default': data64_compiler_map,
'x86:LE:32:default': x86_compiler_map,
'x86:LE:64:default': x86_compiler_map,
}
def get_arch():
triple = util.get_target().triple
if triple is None:
return "x86_64"
return triple.split('-')[0]
def get_endian():
parm = util.get_convenience_variable('endian')
if parm != 'auto':
return parm
# Once again, we have to hack using the human-readable 'show'
order = util.get_target().GetByteOrder()
if order is lldb.eByteOrderLittle:
return 'little'
if order is lldb.eByteOrderBig:
return 'big'
if order is lldb.eByteOrderPDP:
return 'pdp'
return 'unrecognized'
def get_osabi():
parm = util.get_convenience_variable('osabi')
if not parm in ['auto', 'default']:
return parm
# We have to hack around the fact the LLDB won't give us the current OS ABI
# via the API if it is "auto" or "default". Using "show", we can get it, but
# we have to parse output meant for a human. The current value will be on
# the top line, delimited by double quotes. It will be the last delimited
# thing on that line. ("auto" may appear earlier on the line.)
triple = util.get_target().triple
# this is an unfortunate feature of the tests
if triple is None:
return "linux"
return triple.split('-')[2]
def compute_ghidra_language():
# First, check if the parameter is set
lang = util.get_convenience_variable('ghidra-language')
if lang != 'auto':
return lang
# Get the list of possible languages for the arch. We'll need to sift
# through them by endian and probably prefer default/simpler variants. The
# heuristic for "simpler" will be 'default' then shortest variant id.
arch = get_arch()
endian = get_endian()
lebe = ':BE:' if endian == 'big' else ':LE:'
if not arch in language_map:
return 'DATA' + lebe + '64:default'
langs = language_map[arch]
matched_endian = sorted(
(l for l in langs if lebe in l),
key=lambda l: 0 if l.endswith(':default') else len(l)
)
if len(matched_endian) > 0:
return matched_endian[0]
# NOTE: I'm disinclined to fall back to a language match with wrong endian.
return 'DATA' + lebe + '64:default'
def compute_ghidra_compiler(lang):
# First, check if the parameter is set
comp = util.get_convenience_variable('ghidra-compiler')
if comp != 'auto':
return comp
# Check if the selected lang has specific compiler recommendations
if not lang in compiler_map:
return 'default'
comp_map = compiler_map[lang]
osabi = get_osabi()
if osabi in comp_map:
return comp_map[osabi]
if None in comp_map:
return comp_map[None]
return 'default'
def compute_ghidra_lcsp():
lang = compute_ghidra_language()
comp = compute_ghidra_compiler(lang)
return lang, comp
class DefaultMemoryMapper(object):
def __init__(self, defaultSpace):
self.defaultSpace = defaultSpace
def map(self, proc: lldb.SBProcess, offset: int):
space = self.defaultSpace
return self.defaultSpace, Address(space, offset)
def map_back(self, proc: lldb.SBProcess, address: Address) -> int:
if address.space == self.defaultSpace:
return address.offset
raise ValueError(f"Address {address} is not in process {proc.GetProcessID()}")
DEFAULT_MEMORY_MAPPER = DefaultMemoryMapper('ram')
memory_mappers = {}
def compute_memory_mapper(lang):
if not lang in memory_mappers:
return DEFAULT_MEMORY_MAPPER
return memory_mappers[lang]
class DefaultRegisterMapper(object):
def __init__(self, byte_order):
if not byte_order in ['big', 'little']:
raise ValueError("Invalid byte_order: {}".format(byte_order))
self.byte_order = byte_order
self.union_winners = {}
def map_name(self, proc, name):
return name
"""
def convert_value(self, value, type=None):
if type is None:
type = value.dynamic_type.strip_typedefs()
l = type.sizeof
# l - 1 because array() takes the max index, inclusive
# NOTE: Might like to pre-lookup 'unsigned char', but it depends on the
# architecture *at the time of lookup*.
cv = value.cast(lldb.lookup_type('unsigned char').array(l - 1))
rng = range(l)
if self.byte_order == 'little':
rng = reversed(rng)
return bytes(cv[i] for i in rng)
"""
def map_value(self, proc, name, value):
try:
### TODO: this seems half-baked
av = value.to_bytes(8, "big")
except e:
raise ValueError("Cannot convert {}'s value: '{}', type: '{}'"
.format(name, value, value.type))
return RegVal(self.map_name(proc, name), av)
def map_name_back(self, proc, name):
return name
def map_value_back(self, proc, name, value):
return RegVal(self.map_name_back(proc, name), value)
class Intel_x86_64_RegisterMapper(DefaultRegisterMapper):
def __init__(self):
super().__init__('little')
def map_name(self, proc, name):
if name is None:
return 'UNKNOWN'
if name == 'eflags':
return 'rflags'
if name.startswith('zmm'):
# Ghidra only goes up to ymm, right now
return 'ymm' + name[3:]
return super().map_name(proc, name)
def map_value(self, proc, name, value):
rv = super().map_value(proc, name, value)
if rv.name.startswith('ymm') and len(rv.value) > 32:
return RegVal(rv.name, rv.value[-32:])
return rv
def map_name_back(self, proc, name):
if name == 'rflags':
return 'eflags'
DEFAULT_BE_REGISTER_MAPPER = DefaultRegisterMapper('big')
DEFAULT_LE_REGISTER_MAPPER = DefaultRegisterMapper('little')
register_mappers = {
'x86:LE:64:default': Intel_x86_64_RegisterMapper()
}
def compute_register_mapper(lang):
if not lang in register_mappers:
if ':BE:' in lang:
return DEFAULT_BE_REGISTER_MAPPER
if ':LE:' in lang:
return DEFAULT_LE_REGISTER_MAPPER
return register_mappers[lang]

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,709 @@
## ###
# IP: GHIDRA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import time
import threading
import lldb
from . import commands, util
ALL_EVENTS = 0xFFFF
class HookState(object):
__slots__ = ('installed', 'mem_catchpoint')
def __init__(self):
self.installed = False
self.mem_catchpoint = None
class ProcessState(object):
__slots__ = ('first', 'regions', 'modules', 'threads', 'breaks', 'watches', 'visited')
def __init__(self):
self.first = True
# For things we can detect changes to between stops
self.regions = False
self.modules = False
self.threads = False
self.breaks = False
self.watches = False
# For frames and threads that have already been synced since last stop
self.visited = set()
def record(self, description=None):
first = self.first
self.first = False
if description is not None:
commands.STATE.trace.snapshot(description)
if first:
commands.put_processes()
commands.put_environment()
if self.threads:
commands.put_threads()
self.threads = False
thread = util.selected_thread()
if thread is not None:
if first or thread.GetThreadID() not in self.visited:
commands.put_frames()
self.visited.add(thread.GetThreadID())
frame = util.selected_frame()
hashable_frame = (thread.GetThreadID(), frame.GetFrameID())
if first or hashable_frame not in self.visited:
banks = frame.GetRegisters()
commands.putreg(frame, banks.GetFirstValueByName(commands.DEFAULT_REGISTER_BANK))
commands.putmem("$pc", "1", from_tty=False)
commands.putmem("$sp", "1", from_tty=False)
self.visited.add(hashable_frame)
if first or self.regions or self.threads or self.modules:
# Sections, memory syscalls, or stack allocations
commands.put_regions()
self.regions = False
if first or self.modules:
commands.put_modules()
self.modules = False
if first or self.breaks:
commands.put_breakpoints()
self.breaks = False
if first or self.watches:
commands.put_watchpoints()
self.watches = False
def record_continued(self):
commands.put_processes()
commands.put_threads()
def record_exited(self, exit_code):
proc = util.get_process()
ipath = commands.PROCESS_PATTERN.format(procnum=proc.GetProcessID())
commands.STATE.trace.proxy_object_path(
ipath).set_value('_exit_code', exit_code)
class BrkState(object):
__slots__ = ('break_loc_counts',)
def __init__(self):
self.break_loc_counts = {}
def update_brkloc_count(self, b, count):
self.break_loc_counts[b.GetID()] = count
def get_brkloc_count(self, b):
return self.break_loc_counts.get(b.GetID(), 0)
def del_brkloc_count(self, b):
if b not in self.break_loc_counts:
return 0 # TODO: Print a warning?
count = self.break_loc_counts[b.GetID()]
del self.break_loc_counts[b.GetID()]
return count
HOOK_STATE = HookState()
BRK_STATE = BrkState()
PROC_STATE = {}
def process_event(self, listener, event):
try:
desc = util.get_description(event)
#event_process = lldb.SBProcess_GetProcessFromEvent(event)
event_process = util.get_process()
if event_process not in PROC_STATE:
PROC_STATE[event_process.GetProcessID()] = ProcessState()
rc = event_process.GetBroadcaster().AddListener(listener, ALL_EVENTS)
if rc is False:
print("add listener for process failed")
event_thread = lldb.SBThread_GetThreadFromEvent(event)
commands.put_state(event_process)
type = event.GetType()
if lldb.SBTarget.EventIsTargetEvent(event):
print('Event:', desc)
if (type & lldb.SBTarget.eBroadcastBitBreakpointChanged) != 0:
print("eBroadcastBitBreakpointChanged")
return on_breakpoint_modified(event)
if (type & lldb.SBTarget.eBroadcastBitWatchpointChanged) != 0:
print("eBroadcastBitWatchpointChanged")
return on_watchpoint_modified(event)
if (type & lldb.SBTarget.eBroadcastBitModulesLoaded) != 0:
print("eBroadcastBitModulesLoaded")
return on_new_objfile(event)
if (type & lldb.SBTarget.eBroadcastBitModulesUnloaded) != 0:
print("eBroadcastBitModulesUnloaded")
return on_free_objfile(event)
if (type & lldb.SBTarget.eBroadcastBitSymbolsLoaded) != 0:
print("eBroadcastBitSymbolsLoaded")
return True
if lldb.SBProcess.EventIsProcessEvent(event):
if (type & lldb.SBProcess.eBroadcastBitStateChanged) != 0:
print("eBroadcastBitStateChanged")
if not event_process.is_alive:
return on_exited(event)
if event_process.is_stopped:
return on_stop(event)
return True
if (type & lldb.SBProcess.eBroadcastBitInterrupt) != 0:
print("eBroadcastBitInterrupt")
if event_process.is_stopped:
return on_stop(event)
if (type & lldb.SBProcess.eBroadcastBitSTDOUT) != 0:
return True
if (type & lldb.SBProcess.eBroadcastBitSTDERR) != 0:
return True
if (type & lldb.SBProcess.eBroadcastBitProfileData) != 0:
print("eBroadcastBitProfileData")
return True
if (type & lldb.SBProcess.eBroadcastBitStructuredData) != 0:
print("eBroadcastBitStructuredData")
return True
# NB: Thread events not currently processes
if lldb.SBThread.EventIsThreadEvent(event):
print('Event:', desc)
if (type & lldb.SBThread.eBroadcastBitStackChanged) != 0:
print("eBroadcastBitStackChanged")
return on_frame_selected()
if (type & lldb.SBThread.eBroadcastBitThreadSuspended) != 0:
print("eBroadcastBitThreadSuspended")
if event_process.is_stopped:
return on_stop(event)
if (type & lldb.SBThread.eBroadcastBitThreadResumed) != 0:
print("eBroadcastBitThreadResumed")
return on_cont(event)
if (type & lldb.SBThread.eBroadcastBitSelectedFrameChanged) != 0:
print("eBroadcastBitSelectedFrameChanged")
return on_frame_selected()
if (type & lldb.SBThread.eBroadcastBitThreadSelected) != 0:
print("eBroadcastBitThreadSelected")
return on_thread_selected()
if lldb.SBBreakpoint.EventIsBreakpointEvent(event):
print('Event:', desc)
btype = lldb.SBBreakpoint.GetBreakpointEventTypeFromEvent(event);
bpt = lldb.SBBreakpoint.GetBreakpointFromEvent(event);
if btype is lldb.eBreakpointEventTypeAdded:
print("eBreakpointEventTypeAdded")
return on_breakpoint_created(bpt)
if btype is lldb.eBreakpointEventTypeAutoContinueChanged:
print("elldb.BreakpointEventTypeAutoContinueChanged")
return on_breakpoint_modified(bpt)
if btype is lldb.eBreakpointEventTypeCommandChanged:
print("eBreakpointEventTypeCommandChanged")
return on_breakpoint_modified(bpt)
if btype is lldb.eBreakpointEventTypeConditionChanged:
print("eBreakpointEventTypeConditionChanged")
return on_breakpoint_modified(bpt)
if btype is lldb.eBreakpointEventTypeDisabled:
print("eBreakpointEventTypeDisabled")
return on_breakpoint_modified(bpt)
if btype is lldb.eBreakpointEventTypeEnabled:
print("eBreakpointEventTypeEnabled")
return on_breakpoint_modified(bpt)
if btype is lldb.eBreakpointEventTypeIgnoreChanged:
print("eBreakpointEventTypeIgnoreChanged")
return True
if btype is lldb.eBreakpointEventTypeInvalidType:
print("eBreakpointEventTypeInvalidType")
return True
if btype is lldb.eBreakpointEventTypeLocationsAdded:
print("eBreakpointEventTypeLocationsAdded")
return on_breakpoint_modified(bpt)
if btype is lldb.eBreakpointEventTypeLocationsRemoved:
print("eBreakpointEventTypeLocationsRemoved")
return on_breakpoint_modified(bpt)
if btype is lldb.eBreakpointEventTypeLocationsResolved:
print("eBreakpointEventTypeLocationsResolved")
return on_breakpoint_modified(bpt)
if btype is lldb.eBreakpointEventTypeRemoved:
print("eBreakpointEventTypeRemoved")
return on_breakpoint_deleted(bpt)
if btype is lldb.eBreakpointEventTypeThreadChanged:
print("eBreakpointEventTypeThreadChanged")
return on_breakpoint_modified(bpt)
print("UNKNOWN BREAKPOINT EVENT")
return True
if lldb.SBWatchpoint.EventIsWatchpointEvent(event):
print('Event:', desc)
btype = lldb.SBWatchpoint.GetWatchpointEventTypeFromEvent(event);
bpt = lldb.SBWatchpoint.GetWatchpointFromEvent(eventt);
if btype is lldb.eWatchpointEventTypeAdded:
print("eWatchpointEventTypeAdded")
return on_watchpoint_added(bpt)
if btype is lldb.eWatchpointEventTypeCommandChanged:
print("eWatchpointEventTypeCommandChanged")
return on_watchpoint_modified(bpt)
if btype is lldb.eWatchpointEventTypeConditionChanged:
print("eWatchpointEventTypeConditionChanged")
return on_watchpoint_modified(bpt)
if btype is lldb.eWatchpointEventTypeDisabled:
print("eWatchpointEventTypeDisabled")
return on_watchpoint_modified(bpt)
if btype is lldb.eWatchpointEventTypeEnabled:
print("eWatchpointEventTypeEnabled")
return on_watchpoint_modified(bpt)
if btype is lldb.eWatchpointEventTypeIgnoreChanged:
print("eWatchpointEventTypeIgnoreChanged")
return True
if btype is lldb.eWatchpointEventTypeInvalidType:
print("eWatchpointEventTypeInvalidType")
return True
if btype is lldb.eWatchpointEventTypeRemoved:
print("eWatchpointEventTypeRemoved")
return on_watchpoint_deleted(bpt)
if btype is lldb.eWatchpointEventTypeThreadChanged:
print("eWatchpointEventTypeThreadChanged")
return on_watchpoint_modified(bpt)
if btype is lldb.eWatchpointEventTypeTypeChanged:
print("eWatchpointEventTypeTypeChanged")
return on_watchpoint_modified(bpt)
print("UNKNOWN WATCHPOINT EVENT")
return True
if lldb.SBCommandInterpreter.EventIsCommandInterpreterEvent(event):
print('Event:', desc)
if (type & lldb.SBCommandInterpreter.eBroadcastBitAsynchronousErrorData) != 0:
print("eBroadcastBitAsynchronousErrorData")
return True
if (type & lldb.SBCommandInterpreter.eBroadcastBitAsynchronousOutputData) != 0:
print("eBroadcastBitAsynchronousOutputData")
return True
if (type & lldb.SBCommandInterpreter.eBroadcastBitQuitCommandReceived) != 0:
print("eBroadcastBitQuitCommandReceived")
return True
if (type & lldb.SBCommandInterpreter.eBroadcastBitResetPrompt) != 0:
print("eBroadcastBitResetPrompt")
return True
if (type & lldb.SBCommandInterpreter.eBroadcastBitThreadShouldExit) != 0:
print("eBroadcastBitThreadShouldExit")
return True
print("UNKNOWN EVENT")
return True
except RuntimeError as e:
print(e)
class EventThread(threading.Thread):
func = process_event
event = lldb.SBEvent()
def run(self):
# Let's only try at most 4 times to retrieve any kind of event.
# After that, the thread exits.
listener = lldb.SBListener('eventlistener')
cli = util.get_debugger().GetCommandInterpreter()
target = util.get_target()
proc = util.get_process()
rc = cli.GetBroadcaster().AddListener(listener, ALL_EVENTS)
if rc is False:
print("add listener for cli failed")
return
rc = target.GetBroadcaster().AddListener(listener, ALL_EVENTS)
if rc is False:
print("add listener for target failed")
return
rc = proc.GetBroadcaster().AddListener(listener, ALL_EVENTS)
if rc is False:
print("add listener for process failed")
return
# Not sure what effect this logic has
rc = cli.GetBroadcaster().AddInitialEventsToListener(listener, ALL_EVENTS)
if rc is False:
print("add listener for cli failed")
return
rc = target.GetBroadcaster().AddInitialEventsToListener(listener, ALL_EVENTS)
if rc is False:
print("add listener for target failed")
return
rc = proc.GetBroadcaster().AddInitialEventsToListener(listener, ALL_EVENTS)
if rc is False:
print("add listener for process failed")
return
rc = listener.StartListeningForEventClass(util.get_debugger(), lldb.SBThread.GetBroadcasterClassName(), ALL_EVENTS)
if rc is False:
print("add listener for threads failed")
return
# THIS WILL NOT WORK: listener = util.get_debugger().GetListener()
while True:
event_recvd = False
while event_recvd is False:
if listener.WaitForEvent(lldb.UINT32_MAX, self.event):
try:
self.func(listener, self.event)
while listener.GetNextEvent(self.event):
self.func(listener, self.event)
event_recvd = True
except Exception as e:
print(e)
proc = util.get_process()
if proc is not None and not proc.is_alive:
break
return
"""
# Not sure if this is possible in LLDB...
# Respond to user-driven state changes: (Not target-driven)
lldb.events.memory_changed.connect(on_memory_changed)
lldb.events.register_changed.connect(on_register_changed)
# Respond to target-driven memory map changes:
# group:memory is actually a bit broad, but will probably port better
# One alternative is to name all syscalls that cause a change....
# Ones we could probably omit:
# msync,
# (Deals in syncing file-backed pages to disk.)
# mlock, munlock, mlockall, munlockall, mincore, madvise,
# (Deal in paging. Doesn't affect valid addresses.)
# mbind, get_mempolicy, set_mempolicy, migrate_pages, move_pages
# (All NUMA stuff)
#
if HOOK_STATE.mem_catchpoint is not None:
HOOK_STATE.mem_catchpoint.enabled = True
else:
breaks_before = set(lldb.breakpoints())
lldb.execute(
catch syscall group:memory
commands
silent
ghidra-hook event-memory
cont
end
)
HOOK_STATE.mem_catchpoint = (
set(lldb.breakpoints()) - breaks_before).pop()
"""
def on_new_process(event):
trace = commands.STATE.trace
if trace is None:
return
with commands.STATE.client.batch():
with trace.open_tx("New Process {}".format(event.process.num)):
commands.put_processes() # TODO: Could put just the one....
def on_process_selected():
proc = util.get_process()
if proc.GetProcessID() not in PROC_STATE:
return
trace = commands.STATE.trace
if trace is None:
return
with commands.STATE.client.batch():
with trace.open_tx("Process {} selected".format(proc.GetProcessID())):
PROC_STATE[proc.GetProcessID()].record()
commands.activate()
def on_process_deleted(event):
trace = commands.STATE.trace
if trace is None:
return
if event.process.num in PROC_STATE:
del PROC_STATE[event.process.num]
with commands.STATE.client.batch():
with trace.open_tx("Process {} deleted".format(event.process.num)):
commands.put_processes() # TODO: Could just delete the one....
def on_new_thread(event):
proc = util.get_process()
if proc.GetProcessID() not in PROC_STATE:
return
PROC_STATE[proc.GetProcessID()].threads = True
# TODO: Syscall clone/exit to detect thread destruction?
def on_thread_selected():
proc = util.get_process()
if proc.GetProcessID() not in PROC_STATE:
return
trace = commands.STATE.trace
if trace is None:
return
t = util.selected_thread()
with commands.STATE.client.batch():
with trace.open_tx("Thread {}.{} selected".format(proc.GetProcessID(), t.GetThreadID())):
PROC_STATE[proc.GetProcessID()].record()
commands.put_threads()
commands.activate()
def on_frame_selected():
proc = util.get_process()
if proc.GetProcessID() not in PROC_STATE:
return
trace = commands.STATE.trace
if trace is None:
return
f = util.selected_frame()
t = f.GetThread()
with commands.STATE.client.batch():
with trace.open_tx("Frame {}.{}.{} selected".format(proc.GetProcessID(), t.GetThreadID(), f.GetFrameID())):
PROC_STATE[proc.GetProcessID()].record()
commands.put_threads()
commands.put_frames()
commands.activate()
def on_syscall_memory():
proc = util.get_process()
if proc.GetProcessID() not in PROC_STATE:
return
PROC_STATE[proc.GetProcessID()].regions = True
def on_memory_changed(event):
proc = util.get_process()
if proc.GetProcessID() not in PROC_STATE:
return
trace = commands.STATE.trace
if trace is None:
return
with commands.STATE.client.batch():
with trace.open_tx("Memory *0x{:08x} changed".format(event.address)):
commands.put_bytes(event.address, event.address + event.length,
pages=False, is_mi=False, from_tty=False)
def on_register_changed(event):
print("Register changed: {}".format(dir(event)))
proc = util.get_process()
if proc.GetProcessID() not in PROC_STATE:
return
trace = commands.STATE.trace
if trace is None:
return
# I'd rather have a descriptor!
# TODO: How do I get the descriptor from the number?
# For now, just record the lot
with commands.STATE.client.batch():
with trace.open_tx("Register {} changed".format(event.regnum)):
banks = event.frame.GetRegisters()
commands.putreg(
event.frame, banks.GetFirstValueByName(commands.DEFAULT_REGISTER_BANK))
def on_cont(event):
proc = util.get_process()
if proc.GetProcessID() not in PROC_STATE:
return
trace = commands.STATE.trace
if trace is None:
return
state = PROC_STATE[proc.GetProcessID()]
with commands.STATE.client.batch():
with trace.open_tx("Continued"):
state.record_continued()
def on_stop(event):
proc = lldb.SBProcess.GetProcessFromEvent(event)
if proc.GetProcessID() not in PROC_STATE:
print("not in state")
return
trace = commands.STATE.trace
if trace is None:
print("no trace")
return
state = PROC_STATE[proc.GetProcessID()]
state.visited.clear()
with commands.STATE.client.batch():
with trace.open_tx("Stopped"):
state.record("Stopped")
commands.put_event_thread()
commands.put_threads()
commands.put_frames()
commands.activate()
def on_exited(event):
proc = util.get_process()
if proc.GetProcessID() not in PROC_STATE:
return
trace = commands.STATE.trace
if trace is None:
return
state = PROC_STATE[proc.GetProcessID()]
state.visited.clear()
exit_code = proc.GetExitStatus()
description = "Exited with code {}".format(exit_code)
with commands.STATE.client.batch():
with trace.open_tx(description):
state.record(description)
state.record_exited(exit_code)
commands.put_event_thread()
commands.activate()
def notify_others_breaks(proc):
for num, state in PROC_STATE.items():
if num != proc.GetProcessID():
state.breaks = True
def notify_others_watches(proc):
for num, state in PROC_STATE.items():
if num != proc.GetProcessID():
state.watches = True
def modules_changed():
# Assumption: affects the current process
proc = util.get_process()
if proc.GetProcessID() not in PROC_STATE:
return
PROC_STATE[proc.GetProcessID()].modules = True
def on_new_objfile(event):
modules_changed()
def on_free_objfile(event):
modules_changed()
def on_breakpoint_created(b):
proc = util.get_process()
notify_others_breaks(proc)
if proc.GetProcessID() not in PROC_STATE:
return
trace = commands.STATE.trace
if trace is None:
return
ibpath = commands.PROC_BREAKS_PATTERN.format(procnum=proc.GetProcessID())
with commands.STATE.client.batch():
with trace.open_tx("Breakpoint {} created".format(b.GetID())):
ibobj = trace.create_object(ibpath)
# Do not use retain_values or it'll remove other locs
commands.put_single_breakpoint(b, ibobj, proc, [])
ibobj.insert()
def on_breakpoint_modified(b):
proc = util.get_process()
notify_others_breaks(proc)
if proc.GetProcessID() not in PROC_STATE:
return
old_count = BRK_STATE.get_brkloc_count(b)
trace = commands.STATE.trace
if trace is None:
return
ibpath = commands.PROC_BREAKS_PATTERN.format(procnum=proc.GetProcessID())
with commands.STATE.client.batch():
with trace.open_tx("Breakpoint {} modified".format(b.GetID())):
ibobj = trace.create_object(ibpath)
commands.put_single_breakpoint(b, ibobj, proc, [])
new_count = BRK_STATE.get_brkloc_count(b)
# NOTE: Location may not apply to process, but whatever.
for i in range(new_count, old_count):
ikey = commands.PROC_BREAK_KEY_PATTERN.format(
breaknum=b.GetID(), locnum=i+1)
ibobj.set_value(ikey, None)
def on_breakpoint_deleted(b):
proc = util.get_process()
notify_others_breaks(proc)
if proc.GetProcessID() not in PROC_STATE:
return
old_count = BRK_STATE.del_brkloc_count(b.GetID())
trace = commands.STATE.trace
if trace is None:
return
bpath = commands.BREAKPOINT_PATTERN.format(breaknum=b.GetID())
ibobj = trace.proxy_object_path(
commands.PROC_BREAKS_PATTERN.format(procnum=proc.GetProcessID()))
with commands.STATE.client.batch():
with trace.open_tx("Breakpoint {} deleted".format(b.GetID())):
trace.proxy_object_path(bpath).remove(tree=True)
for i in range(old_count):
ikey = commands.PROC_BREAK_KEY_PATTERN.format(
breaknum=b.GetID(), locnum=i+1)
ibobj.set_value(ikey, None)
def on_watchpoint_created(b):
proc = util.get_process()
notify_others_watches(proc)
if proc.GetProcessID() not in PROC_STATE:
return
trace = commands.STATE.trace
if trace is None:
return
ibpath = commands.PROC_WATCHES_PATTERN.format(procnum=proc.GetProcessID())
with commands.STATE.client.batch():
with trace.open_tx("Breakpoint {} created".format(b.GetID())):
ibobj = trace.create_object(ibpath)
# Do not use retain_values or it'll remove other locs
commands.put_single_watchpoint(b, ibobj, proc, [])
ibobj.insert()
def on_watchpoint_modified(b):
proc = util.get_process()
notify_others_watches(proc)
if proc.GetProcessID() not in PROC_STATE:
return
old_count = BRK_STATE.get_brkloc_count(b)
trace = commands.STATE.trace
if trace is None:
return
ibpath = commands.PROC_WATCHES_PATTERN.format(procnum=proc.GetProcessID())
with commands.STATE.client.batch():
with trace.open_tx("Watchpoint {} modified".format(b.GetID())):
ibobj = trace.create_object(ibpath)
commands.put_single_watchpoint(b, ibobj, proc, [])
def on_watchpoint_deleted(b):
proc = util.get_process()
notify_others_watches(proc)
if proc.GetProcessID() not in PROC_STATE:
return
trace = commands.STATE.trace
if trace is None:
return
bpath = commands.WATCHPOINT_PATTERN.format(watchnum=b.GetID())
ibobj = trace.proxy_object_path(
commands.PROC_WATCHES_PATTERN.format(procnum=proc.GetProcessID()))
with commands.STATE.client.batch():
with trace.open_tx("Watchpoint {} deleted".format(b.GetID())):
trace.proxy_object_path(bpath).remove(tree=True)
def install_hooks():
if HOOK_STATE.installed:
return
HOOK_STATE.installed = True
event_thread = EventThread()
event_thread.start()
def remove_hooks():
if not HOOK_STATE.installed:
return
HOOK_STATE.installed = False
def enable_current_process():
proc = util.get_process()
PROC_STATE[proc.GetProcessID()] = ProcessState()
def disable_current_process():
proc = util.get_process()
if proc.GetProcessID() in PROC_STATE:
# Silently ignore already disabled
del PROC_STATE[proc.GetProcessID()]

View File

@ -0,0 +1,640 @@
## ###
# IP: GHIDRA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from concurrent.futures import Future, ThreadPoolExecutor
import re
from ghidratrace import sch
from ghidratrace.client import MethodRegistry, ParamDesc, Address, AddressRange
import lldb
from . import commands, util
REGISTRY = MethodRegistry(ThreadPoolExecutor(max_workers=1))
def extre(base, ext):
return re.compile(base.pattern + ext)
AVAILABLE_PATTERN = re.compile('Available\[(?P<pid>\\d*)\]')
WATCHPOINT_PATTERN = re.compile('Watchpoints\[(?P<watchnum>\\d*)\]')
BREAKPOINT_PATTERN = re.compile('Breakpoints\[(?P<breaknum>\\d*)\]')
BREAK_LOC_PATTERN = extre(BREAKPOINT_PATTERN, '\[(?P<locnum>\\d*)\]')
PROCESS_PATTERN = re.compile('Processes\[(?P<procnum>\\d*)\]')
PROC_BREAKS_PATTERN = extre(PROCESS_PATTERN, '\.Breakpoints')
PROC_WATCHES_PATTERN = extre(PROCESS_PATTERN, '\.Watchpoints')
PROC_WATCHLOC_PATTERN = extre(PROC_WATCHES_PATTERN, '\[(?P<watchnum>\\d*)\]')
ENV_PATTERN = extre(PROCESS_PATTERN, '\.Environment')
THREADS_PATTERN = extre(PROCESS_PATTERN, '\.Threads')
THREAD_PATTERN = extre(THREADS_PATTERN, '\[(?P<tnum>\\d*)\]')
STACK_PATTERN = extre(THREAD_PATTERN, '\.Stack')
FRAME_PATTERN = extre(STACK_PATTERN, '\[(?P<level>\\d*)\]')
REGS_PATTERN = extre(FRAME_PATTERN, '.Registers')
MEMORY_PATTERN = extre(PROCESS_PATTERN, '\.Memory')
MODULES_PATTERN = extre(PROCESS_PATTERN, '\.Modules')
def find_availpid_by_pattern(pattern, object, err_msg):
mat = pattern.fullmatch(object.path)
if mat is None:
raise TypeError(f"{object} is not {err_msg}")
pid = int(mat['pid'])
return pid
def find_availpid_by_obj(object):
return find_availpid_by_pattern(AVAILABLE_PATTERN, object, "an Available")
def find_proc_by_num(procnum):
return util.get_process()
def find_proc_by_pattern(object, pattern, err_msg):
print(object.path)
mat = pattern.fullmatch(object.path)
print(mat)
if mat is None:
raise TypeError(f"{object} is not {err_msg}")
procnum = int(mat['procnum'])
return find_proc_by_num(procnum)
def find_proc_by_obj(object):
return find_proc_by_pattern(object, PROCESS_PATTERN, "an Process")
def find_proc_by_procbreak_obj(object):
return find_proc_by_pattern(object, PROC_BREAKS_PATTERN,
"a BreakpointLocationContainer")
def find_proc_by_procwatch_obj(object):
return find_proc_by_pattern(object, PROC_WATCHES_PATTERN,
"a WatchpointContainer")
def find_proc_by_env_obj(object):
return find_proc_by_pattern(object, ENV_PATTERN, "an Environment")
def find_proc_by_threads_obj(object):
return find_proc_by_pattern(object, THREADS_PATTERN, "a ThreadContainer")
def find_proc_by_mem_obj(object):
return find_proc_by_pattern(object, MEMORY_PATTERN, "a Memory")
def find_proc_by_modules_obj(object):
return find_proc_by_pattern(object, MODULES_PATTERN, "a ModuleContainer")
def find_thread_by_num(proc, tnum):
for t in proc.threads:
if t.GetThreadID() == tnum:
return t
raise KeyError(f"Processes[{proc.GetProcessID()}].Threads[{tnum}] does not exist")
def find_thread_by_pattern(pattern, object, err_msg):
mat = pattern.fullmatch(object.path)
if mat is None:
raise TypeError(f"{object} is not {err_msg}")
procnum = int(mat['procnum'])
tnum = int(mat['tnum'])
proc = find_proc_by_num(procnum)
return find_thread_by_num(proc, tnum)
def find_thread_by_obj(object):
return find_thread_by_pattern(THREAD_PATTERN, object, "a Thread")
def find_thread_by_stack_obj(object):
return find_thread_by_pattern(STACK_PATTERN, object, "a Stack")
def find_frame_by_level(thread, level):
return thread.GetFrameAtIndex(level)
def find_frame_by_pattern(pattern, object, err_msg):
mat = pattern.fullmatch(object.path)
if mat is None:
raise TypeError(f"{object} is not {err_msg}")
procnum = int(mat['procnum'])
tnum = int(mat['tnum'])
level = int(mat['level'])
proc = find_proc_by_num(procnum)
t = find_thread_by_num(proc, tnum)
return find_frame_by_level(t, level)
def find_frame_by_obj(object):
return find_frame_by_pattern(FRAME_PATTERN, object, "a StackFrame")
def find_frame_by_regs_obj(object):
return find_frame_by_pattern(REGS_PATTERN, object,
"a RegisterValueContainer")
# Because there's no method to get a register by name....
def find_reg_by_name(f, name):
for reg in f.architecture().registers():
if reg.name == name:
return reg
raise KeyError(f"No such register: {name}")
# Oof. no lldb/Python method to get breakpoint by number
# I could keep my own cache in a dict, but why?
def find_bpt_by_number(breaknum):
# TODO: If len exceeds some threshold, use binary search?
for i in range(0,util.get_target().GetNumBreakpoints()):
b = util.get_target().GetBreakpointAtIndex(i)
if b.GetID() == breaknum:
return b
raise KeyError(f"Breakpoints[{breaknum}] does not exist")
def find_bpt_by_pattern(pattern, object, err_msg):
mat = pattern.fullmatch(object.path)
if mat is None:
raise TypeError(f"{object} is not {err_msg}")
breaknum = int(mat['breaknum'])
return find_bpt_by_number(breaknum)
def find_bpt_by_obj(object):
return find_bpt_by_pattern(BREAKPOINT_PATTERN, object, "a BreakpointSpec")
# Oof. no lldb/Python method to get breakpoint by number
# I could keep my own cache in a dict, but why?
def find_wpt_by_number(watchnum):
# TODO: If len exceeds some threshold, use binary search?
for i in range(0,util.get_target().GetNumWatchpoints()):
w = util.get_target().GetWatchpointAtIndex(i)
if w.GetID() == watchnum:
return w
raise KeyError(f"Watchpoints[{watchnum}] does not exist")
def find_wpt_by_pattern(pattern, object, err_msg):
mat = pattern.fullmatch(object.path)
if mat is None:
raise TypeError(f"{object} is not {err_msg}")
watchnum = int(mat['watchnum'])
return find_wpt_by_number(watchnum)
def find_wpt_by_obj(object):
return find_wpt_by_pattern(PROC_WATCHLOC_PATTERN, object, "a WatchpointSpec")
def find_bptlocnum_by_pattern(pattern, object, err_msg):
mat = pattern.fullmatch(object.path)
if mat is None:
raise TypError(f"{object} is not {err_msg}")
breaknum = int(mat['breaknum'])
locnum = int(mat['locnum'])
return breaknum, locnum
def find_bptlocnum_by_obj(object):
return find_bptlocnum_by_pattern(BREAK_LOC_PATTERN, object,
"a BreakpointLocation")
def find_bpt_loc_by_obj(object):
breaknum, locnum = find_bptlocnum_by_obj(object)
bpt = find_bpt_by_number(breaknum)
# Requires lldb-13.1 or later
return bpt.locations[locnum - 1] # Display is 1-up
@REGISTRY.method
def execute(cmd: str, to_string: bool=False):
"""Execute a CLI command."""
res = lldb.SBCommandReturnObject()
util.get_debugger().GetCommandInterpreter().HandleCommand(cmd, res)
if to_string:
if res.Succeeded():
return res.GetOutput()
else:
return res.GetError()
@REGISTRY.method(action='refresh')
def refresh_available(node: sch.Schema('AvailableContainer')):
"""List processes on lldb's host system."""
with commands.open_tracked_tx('Refresh Available'):
util.get_debugger().HandleCommand('ghidra_trace_put_available')
@REGISTRY.method(action='refresh')
def refresh_breakpoints(node: sch.Schema('BreakpointContainer')):
"""
Refresh the list of breakpoints (including locations for the current
process).
"""
with commands.open_tracked_tx('Refresh Breakpoints'):
util.get_debugger().HandleCommand('ghidra_trace_put_breakpoints')
@REGISTRY.method(action='refresh')
def refresh_processes(node: sch.Schema('ProcessContainer')):
"""Refresh the list of processes."""
with commands.open_tracked_tx('Refresh Processes'):
util.get_debugger().HandleCommand('ghidra_trace_put_threads')
@REGISTRY.method(action='refresh')
def refresh_proc_breakpoints(node: sch.Schema('BreakpointLocationContainer')):
"""
Refresh the breakpoint locations for the process.
In the course of refreshing the locations, the breakpoint list will also be
refreshed.
"""
with commands.open_tracked_tx('Refresh Breakpoint Locations'):
util.get_debugger().HandleCommand('ghidra_trace_put_breakpoints');
@REGISTRY.method(action='refresh')
def refresh_proc_watchpoints(node: sch.Schema('WatchpointContainer')):
"""
Refresh the watchpoint locations for the process.
In the course of refreshing the locations, the watchpoint list will also be
refreshed.
"""
with commands.open_tracked_tx('Refresh Watchpoint Locations'):
util.get_debugger().HandleCommand('ghidra_trace_put_watchpoints');
@REGISTRY.method(action='refresh')
def refresh_environment(node: sch.Schema('Environment')):
"""Refresh the environment descriptors (arch, os, endian)."""
with commands.open_tracked_tx('Refresh Environment'):
util.get_debugger().HandleCommand('ghidra_trace_put_environment')
@REGISTRY.method(action='refresh')
def refresh_threads(node: sch.Schema('ThreadContainer')):
"""Refresh the list of threads in the process."""
with commands.open_tracked_tx('Refresh Threads'):
util.get_debugger().HandleCommand('ghidra_trace_put_threads')
@REGISTRY.method(action='refresh')
def refresh_stack(node: sch.Schema('Stack')):
"""Refresh the backtrace for the thread."""
t = find_thread_by_stack_obj(node)
t.process.SetSelectedThread(t)
with commands.open_tracked_tx('Refresh Stack'):
util.get_debugger().HandleCommand('ghidra_trace_put_frames');
@REGISTRY.method(action='refresh')
def refresh_registers(node: sch.Schema('RegisterValueContainer')):
"""Refresh the register values for the frame."""
f = find_frame_by_regs_obj(node)
f.thread.SetSelectedFrame(f.GetFrameID())
# TODO: Groups?
with commands.open_tracked_tx('Refresh Registers'):
util.get_debugger().HandleCommand('ghidra_trace_putreg');
@REGISTRY.method(action='refresh')
def refresh_mappings(node: sch.Schema('Memory')):
"""Refresh the list of memory regions for the process."""
with commands.open_tracked_tx('Refresh Memory Regions'):
util.get_debugger().HandleCommand('ghidra_trace_put_regions');
@REGISTRY.method(action='refresh')
def refresh_modules(node: sch.Schema('ModuleContainer')):
"""
Refresh the modules and sections list for the process.
This will refresh the sections for all modules, not just the selected one.
"""
with commands.open_tracked_tx('Refresh Modules'):
util.get_debugger().HandleCommand('ghidra_trace_put_modules');
@REGISTRY.method(action='activate')
def activate_process(process: sch.Schema('Process')):
"""Switch to the process."""
return
@REGISTRY.method(action='activate')
def activate_thread(thread: sch.Schema('Thread')):
"""Switch to the thread."""
t = find_thread_by_obj(thread)
t.process.SetSelectedThread(t)
@REGISTRY.method(action='activate')
def activate_frame(frame: sch.Schema('StackFrame')):
"""Select the frame."""
f = find_frame_by_obj(frame)
f.thread.SetSelectedFrame(f.GetFrameID())
@REGISTRY.method(action='delete')
def remove_process(process: sch.Schema('Process')):
"""Remove the process."""
proc = find_proc_by_obj(process)
util.get_debugger().HandleCommand(f'target delete 0')
@REGISTRY.method(action='connect')
def target(process: sch.Schema('Process'), spec: str):
"""Connect to a target machine or process."""
util.get_debugger().HandleCommand(f'target select {spec}')
@REGISTRY.method(action='attach')
def attach_obj(process: sch.Schema('Process'), target: sch.Schema('Attachable')):
"""Attach the process to the given target."""
pid = find_availpid_by_obj(target)
util.get_debugger().HandleCommand(f'process attach -p {pid}')
@REGISTRY.method(action='attach')
def attach_pid(process: sch.Schema('Process'), pid: int):
"""Attach the process to the given target."""
util.get_debugger().HandleCommand(f'process attach -p {pid}')
@REGISTRY.method(action='attach')
def attach_name(process: sch.Schema('Process'), name: str):
"""Attach the process to the given target."""
util.get_debugger().HandleCommand(f'process attach -n {name}')
@REGISTRY.method
def detach(process: sch.Schema('Process')):
"""Detach the process's target."""
util.get_debugger().HandleCommand(f'process detach')
@REGISTRY.method(action='launch')
def launch_loader(process: sch.Schema('Process'),
file: ParamDesc(str, display='File'),
args: ParamDesc(str, display='Arguments')=''):
"""
Start a native process with the given command line, stopping at 'main'.
If 'main' is not defined in the file, this behaves like 'run'.
"""
util.get_debugger().HandleCommand(f'file {file}')
if args is not '':
util.get_debugger().HandleCommand(f'settings set target.run-args {args}')
util.get_debugger().HandleCommand(f'process launch --stop-at-entry')
@REGISTRY.method(action='launch')
def launch(process: sch.Schema('Process'),
file: ParamDesc(str, display='File'),
args: ParamDesc(str, display='Arguments')=''):
"""
Run a native process with the given command line.
The process will not stop until it hits one of your breakpoints, or it is
signaled.
"""
util.get_debugger().HandleCommand(f'file {file}')
if args is not '':
util.get_debugger().HandleCommand(f'settings set target.run-args {args}')
util.get_debugger().HandleCommand(f'run')
@REGISTRY.method
def kill(process: sch.Schema('Process')):
"""Kill execution of the process."""
util.get_debugger().HandleCommand('process kill')
@REGISTRY.method(name='continue', action='resume')
def _continue(process: sch.Schema('Process')):
"""Continue execution of the process."""
util.get_debugger().HandleCommand('process continue')
@REGISTRY.method
def interrupt():
"""Interrupt the execution of the debugged program."""
util.get_debugger().HandleCommand('process interrupt')
#util.get_process().SendAsyncInterrupt()
#util.get_debugger().HandleCommand('^c')
#util.get_process().Signal(2)
@REGISTRY.method(action='step_into')
def step_into(thread: sch.Schema('Thread'), n: ParamDesc(int, display='N')=1):
"""Step on instruction exactly."""
t = find_thread_by_obj(thread)
t.process.SetSelectedThread(t)
util.get_debugger().HandleCommand('thread step-inst')
@REGISTRY.method(action='step_over')
def step_over(thread: sch.Schema('Thread'), n: ParamDesc(int, display='N')=1):
"""Step one instruction, but proceed through subroutine calls."""
t = find_thread_by_obj(thread)
t.process.SetSelectedThread(t)
util.get_debugger().HandleCommand('thread step-inst-over')
@REGISTRY.method(action='step_out')
def step_out(thread: sch.Schema('Thread')):
"""Execute until the current stack frame returns."""
if thread is not None:
t = find_thread_by_obj(thread)
t.process.SetSelectedThread(t)
util.get_debugger().HandleCommand('thread step-out')
@REGISTRY.method(action='step_ext')
def step_ext(thread: sch.Schema('Thread'), address: Address):
"""Continue execution up to the given address."""
t = find_thread_by_obj(thread)
t.process.SetSelectedThread(t)
offset = thread.trace.memory_mapper.map_back(t.process, address)
util.get_debugger().HandleCommand(f'thread until -a {offset}')
@REGISTRY.method(name='return', action='step_ext')
def _return(thread: sch.Schema('Thread'), value: int=None):
"""Skip the remainder of the current function."""
t = find_thread_by_obj(thread)
t.process.SetSelectedThread(t)
if value is None:
util.get_debugger().HandleCommand('thread return')
else:
util.get_debugger().HandleCommand(f'thread return {value}')
@REGISTRY.method(action='break_sw_execute')
def break_address(process: sch.Schema('Process'), address: Address):
"""Set a breakpoint."""
proc = find_proc_by_obj(process)
offset = process.trace.memory_mapper.map_back(proc, address)
util.get_debugger().HandleCommand(f'breakpoint set -a 0x{offset:x}')
@REGISTRY.method(action='break_sw_execute')
def break_expression(expression: str):
"""Set a breakpoint."""
# TODO: Escape?
util.get_debugger().HandleCommand(f'breakpoint set -r {expression}')
@REGISTRY.method(action='break_hw_execute')
def break_hw_address(process: sch.Schema('Process'), address: Address):
"""Set a hardware-assisted breakpoint."""
proc = find_proc_by_obj(process)
offset = process.trace.memory_mapper.map_back(proc, address)
util.get_debugger().HandleCommand(f'breakpoint set -H -a 0x{offset:x}')
@REGISTRY.method(action='break_hw_execute')
def break_hw_expression(expression: str):
"""Set a hardware-assisted breakpoint."""
# TODO: Escape?
util.get_debugger().HandleCommand(f'breakpoint set -H -name {expression}')
@REGISTRY.method(action='break_read')
def break_read_range(process: sch.Schema('Process'), range: AddressRange):
"""Set a read watchpoint."""
proc = find_proc_by_obj(process)
offset_start = process.trace.memory_mapper.map_back(
proc, Address(range.space, range.min))
sz = range.length()
util.get_debugger().HandleCommand(f'watchpoint set expression -s {sz} -w read -- {offset_start}')
@REGISTRY.method(action='break_read')
def break_read_expression(expression: str):
"""Set a read watchpoint."""
util.get_debugger().HandleCommand(f'watchpoint set expression -w read -- {expression}')
@REGISTRY.method(action='break_write')
def break_write_range(process: sch.Schema('Process'), range: AddressRange):
"""Set a watchpoint."""
proc = find_proc_by_obj(process)
offset_start = process.trace.memory_mapper.map_back(
proc, Address(range.space, range.min))
sz = range.length()
util.get_debugger().HandleCommand(f'watchpoint set expression -s {sz} -- {offset_start}')
@REGISTRY.method(action='break_write')
def break_write_expression(expression: str):
"""Set a watchpoint."""
util.get_debugger().HandleCommand(f'watchpoint set expression -- {expression}')
@REGISTRY.method(action='break_access')
def break_access_range(process: sch.Schema('Process'), range: AddressRange):
"""Set an access watchpoint."""
proc = find_proc_by_obj(process)
offset_start = process.trace.memory_mapper.map_back(
proc, Address(range.space, range.min))
sz = range.length()
util.get_debugger().HandleCommand(f'watchpoint set expression -s {sz} -w read_write -- {offset_start}')
@REGISTRY.method(action='break_access')
def break_access_expression(expression: str):
"""Set an access watchpoint."""
util.get_debugger().HandleCommand(f'watchpoint set expression -w read_write -- {expression}')
@REGISTRY.method(action='break_ext')
def break_exception(lang: str):
"""Set a catchpoint."""
util.get_debugger().HandleCommand(f'breakpoint set -E {lang}')
@REGISTRY.method(action='toggle')
def toggle_watchpoint(breakpoint: sch.Schema('WatchpointSpec'), enabled: bool):
"""Toggle a watchpoint."""
wpt = find_wpt_by_obj(watchpoint)
wpt.enabled = enabled
@REGISTRY.method(action='toggle')
def toggle_breakpoint(breakpoint: sch.Schema('BreakpointSpec'), enabled: bool):
"""Toggle a breakpoint."""
bpt = find_bpt_by_obj(breakpoint)
bpt.enabled = enabled
@REGISTRY.method(action='toggle')
def toggle_breakpoint_location(location: sch.Schema('BreakpointLocation'), enabled: bool):
"""Toggle a breakpoint location."""
bptnum, locnum = find_bptlocnum_by_obj(location)
cmd = 'enable' if enabled else 'disable'
util.get_debugger().HandleCommand(f'breakpoint {cmd} {bptnum}.{locnum}')
@REGISTRY.method(action='delete')
def delete_watchpoint(watchpoint: sch.Schema('WatchpointSpec')):
"""Delete a watchpoint."""
wpt = find_wpt_by_obj(watchpoint)
wptnum = wpt.GetID()
util.get_debugger().HandleCommand(f'watchpoint delete {wptnum}')
@REGISTRY.method(action='delete')
def delete_breakpoint(breakpoint: sch.Schema('BreakpointSpec')):
"""Delete a breakpoint."""
bpt = find_bpt_by_obj(breakpoint)
bptnum = bpt.GetID()
util.get_debugger().HandleCommand(f'breakpoint delete {bptnum}')
@REGISTRY.method
def read_mem(process: sch.Schema('Process'), range: AddressRange):
"""Read memory."""
proc = find_proc_by_obj(process)
offset_start = process.trace.memory_mapper.map_back(
proc, Address(range.space, range.min))
with commands.open_tracked_tx('Read Memory'):
util.get_debugger().HandleCommand(f'ghidra_trace_putmem 0x{offset_start:x} {range.length()}')
@REGISTRY.method
def write_mem(process: sch.Schema('Process'), address: Address, data: bytes):
"""Write memory."""
proc = find_proc_by_obj(process)
offset = process.trace.memory_mapper.map_back(proc, address)
proc.write_memory(offset, data)
@REGISTRY.method
def write_reg(frame: sch.Schema('Frame'), name: str, value: bytes):
"""Write a register."""
f = find_frame_by_obj(frame)
f.select()
proc = lldb.selected_process()
mname, mval = frame.trace.register_mapper.map_value_back(proc, name, value)
reg = find_reg_by_name(f, mname)
size = int(lldb.parse_and_eval(f'sizeof(${mname})'))
arr = '{' + ','.join(str(b) for b in mval) + '}'
util.get_debugger().HandleCommand(f'expr ((unsigned char[{size}])${mname}) = {arr};')

View File

@ -0,0 +1,46 @@
## ###
# IP: GHIDRA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import lldb
# TODO: I don't know how to register a custom parameter prefix. I would rather
# these were 'ghidra language' and 'ghidra compiler'
class GhidraLanguageParameter(lldb.Parameter):
"""
The language id for Ghidra traces. Set this to 'auto' to try to derive it
from 'show arch' and 'show endian'. Otherwise, set it to a Ghidra
LanguageID.
"""
def __init__(self):
super().__init__('ghidra-language', lldb.COMMAND_DATA, lldb.PARAM_STRING)
self.value = 'auto'
GhidraLanguageParameter()
class GhidraCompilerParameter(lldb.Parameter):
"""
The compiler spec id for Ghidra traces. Set this to 'auto' to try to derive
it from 'show osabi'. Otherwise, set it to a Ghidra CompilerSpecID. Note
that valid compiler spec ids depend on the language id.
"""
def __init__(self):
super().__init__('ghidra-compiler', lldb.COMMAND_DATA, lldb.PARAM_STRING)
self.value = 'auto'
GhidraCompilerParameter()

View File

@ -0,0 +1,465 @@
<context>
<schema name="Session" elementResync="NEVER" attributeResync="NEVER">
<interface name="Access" />
<interface name="Attacher" />
<interface name="Interpreter" />
<interface name="Interruptible" />
<interface name="Launcher" />
<interface name="ActiveScope" />
<interface name="EventScope" />
<interface name="FocusScope" />
<interface name="Aggregate" />
<element schema="VOID" />
<attribute name="Processes" schema="ProcessContainer" required="yes" fixed="yes" />
<attribute name="Available" schema="AvailableContainer" required="yes" fixed="yes" />
<attribute name="Breakpoints" schema="BreakpointContainer" required="yes" fixed="yes" />
<attribute name="Watchpoints" schema="WatchpointContainer" required="yes" fixed="yes" />
<attribute name="_accessible" schema="BOOL" required="yes" hidden="yes" />
<attribute name="_supported_attach_kinds" schema="SET_ATTACH_KIND" required="yes" hidden="yes" />
<attribute name="_prompt" schema="STRING" required="yes" hidden="yes" />
<attribute name="_parameters" schema="MAP_PARAMETERS" required="yes" hidden="yes" />
<attribute name="_event_thread" schema="OBJECT" hidden="yes" />
<attribute name="_focus" schema="Selectable" required="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="Selectable" elementResync="NEVER" attributeResync="NEVER">
<element schema="OBJECT" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="BreakpointContainer" canonical="yes" elementResync="NEVER" attributeResync="NEVER">
<interface name="BreakpointSpecContainer" />
<element schema="BreakpointSpec" />
<attribute name="_supported_breakpoint_kinds" schema="SET_BREAKPOINT_KIND" required="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="WatchpointContainer" canonical="yes" elementResync="NEVER" attributeResync="NEVER">
<interface name="WatchpointSpecContainer" />
<element schema="WatchpointSpec" />
<attribute name="_supported_breakpoint_kinds" schema="SET_BREAKPOINT_KIND" required="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="AvailableContainer" canonical="yes" elementResync="ALWAYS" attributeResync="NEVER">
<interface name="Configurable" />
<element schema="Attachable" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute name="_base" schema="INT" />
<attribute schema="VOID" />
</schema>
<schema name="ProcessContainer" canonical="yes" elementResync="NEVER" attributeResync="NEVER">
<interface name="Configurable" />
<element schema="Process" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute name="_base" schema="INT" />
<attribute schema="VOID" />
</schema>
<schema name="BreakpointSpec" canonical="yes" elementResync="NEVER" attributeResync="NEVER">
<interface name="BreakpointSpec" />
<interface name="Deletable" />
<interface name="Togglable" />
<element schema="BreakpointLocation" />
<attribute name="_container" schema="BreakpointContainer" required="yes" hidden="yes" />
<attribute name="_expression" schema="STRING" required="yes" hidden="yes" />
<attribute name="_kinds" schema="SET_BREAKPOINT_KIND" required="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute name="_enabled" schema="BOOL" required="yes" hidden="yes" />
<attribute name="Commands" schema="STRING" />
<attribute name="Condition" schema="STRING" />
<attribute name="Hit Count" schema="INT" />
<attribute name="Ignore Count" schema="INT" />
<attribute name="Pending" schema="BOOL" />
<attribute name="Silent" schema="BOOL" />
<attribute name="Temporary" schema="BOOL" />
<attribute schema="VOID" />
</schema>
<schema name="WatchpointSpec" canonical="yes" elementResync="NEVER" attributeResync="NEVER">
<interface name="BreakpointSpec" />
<interface name="Deletable" />
<interface name="Togglable" />
<attribute name="_container" schema="WatchpointContainer" required="yes" hidden="yes" />
<attribute name="_expression" schema="STRING" required="yes" hidden="yes" />
<attribute name="_kinds" schema="SET_BREAKPOINT_KIND" required="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute name="_enabled" schema="BOOL" required="yes" hidden="yes" />
<attribute name="_range" schema="RANGE" hidden="yes" />
<attribute name="Condition" schema="STRING" />
<attribute name="Hit Count" schema="INT" />
<attribute name="Ignore Count" schema="INT" />
<attribute schema="VOID" />
</schema>
<schema name="Attachable" elementResync="NEVER" attributeResync="NEVER">
<interface name="Attachable" />
<element schema="VOID" />
<attribute name="_pid" schema="LONG" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="Process" elementResync="NEVER" attributeResync="NEVER">
<interface name="Process" />
<interface name="Aggregate" />
<interface name="ExecutionStateful" />
<interface name="Attacher" />
<interface name="Deletable" />
<interface name="Detachable" />
<interface name="Killable" />
<interface name="Launcher" />
<interface name="Resumable" />
<interface name="Steppable" />
<interface name="Interruptible" />
<element schema="VOID" />
<attribute name="Threads" schema="ThreadContainer" required="yes" fixed="yes" />
<attribute name="Breakpoints" schema="BreakpointLocationContainer" required="yes" fixed="yes" />
<attribute name="Watchpoints" schema="WatchpointContainer" required="yes" fixed="yes" />
<attribute name="_exit_code" schema="LONG" />
<attribute name="Environment" schema="Environment" required="yes" fixed="yes" />
<attribute name="Memory" schema="Memory" required="yes" fixed="yes" />
<attribute name="Modules" schema="ModuleContainer" required="yes" fixed="yes" />
<attribute name="_pid" schema="LONG" hidden="yes" />
<attribute name="_state" schema="EXECUTION_STATE" required="yes" hidden="yes" />
<attribute name="_supported_attach_kinds" schema="SET_ATTACH_KIND" required="yes" hidden="yes" />
<attribute name="_parameters" schema="MAP_PARAMETERS" required="yes" hidden="yes" />
<attribute name="_supported_step_kinds" schema="SET_STEP_KIND" required="yes" fixed="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="Environment" elementResync="NEVER" attributeResync="NEVER">
<interface name="Environment" />
<element schema="VOID" />
<attribute name="arch" schema="STRING" />
<attribute name="os" schema="STRING" />
<attribute name="endian" schema="STRING" />
<attribute name="_arch" schema="STRING" hidden="yes" />
<attribute name="_debugger" schema="STRING" hidden="yes" />
<attribute name="_os" schema="STRING" hidden="yes" />
<attribute name="_endian" schema="STRING" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="ModuleContainer" canonical="yes" elementResync="ONCE" attributeResync="NEVER">
<interface name="ModuleContainer" />
<element schema="Module" />
<attribute name="_supports_synthetic_modules" schema="BOOL" fixed="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="Memory" canonical="yes" elementResync="NEVER" attributeResync="NEVER">
<interface name="Memory" />
<element schema="MemoryRegion" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="BreakpointLocation" elementResync="NEVER" attributeResync="NEVER">
<interface name="BreakpointLocation" />
<element schema="VOID" />
<attribute name="_range" schema="RANGE" hidden="yes" />
<attribute name="_spec" schema="BreakpointSpec" required="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="BreakpointLocationContainer" canonical="yes" elementResync="NEVER" attributeResync="NEVER">
<interface name="BreakpointLocationContainer" />
<element schema="BreakpointLocation" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="ThreadContainer" canonical="yes" elementResync="NEVER" attributeResync="NEVER">
<interface name="Configurable" />
<element schema="Thread" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute name="_base" schema="INT" />
<attribute schema="VOID" />
</schema>
<schema name="Method" elementResync="NEVER" attributeResync="NEVER">
<interface name="Method" />
<element schema="VOID" />
<attribute name="_display" schema="STRING" required="yes" fixed="yes" hidden="yes" />
<attribute name="_return_type" schema="TYPE" required="yes" fixed="yes" hidden="yes" />
<attribute name="_parameters" schema="MAP_PARAMETERS" required="yes" fixed="yes" hidden="yes" />
<attribute schema="VOID" fixed="yes" hidden="yes" />
</schema>
<schema name="Thread" elementResync="NEVER" attributeResync="NEVER">
<interface name="Thread" />
<interface name="ExecutionStateful" />
<interface name="Steppable" />
<interface name="Aggregate" />
<element schema="VOID" />
<attribute name="Stack" schema="Stack" required="yes" fixed="yes" />
<attribute name="_tid" schema="LONG" hidden="yes" />
<attribute name="_state" schema="EXECUTION_STATE" required="yes" hidden="yes" />
<attribute name="_supported_step_kinds" schema="SET_STEP_KIND" required="yes" fixed="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute name="Advance" schema="Method" required="yes" fixed="yes" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="Module" elementResync="NEVER" attributeResync="NEVER">
<interface name="Module" />
<element schema="VOID" />
<attribute name="Sections" schema="SectionContainer" required="yes" fixed="yes" />
<attribute name="Symbols" schema="SymbolContainer" required="yes" fixed="yes" />
<attribute name="range" schema="RANGE" />
<attribute name="module name" schema="STRING" />
<attribute name="_module_name" schema="STRING" required="yes" hidden="yes" />
<attribute name="_range" schema="RANGE" required="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="MemoryRegion" elementResync="NEVER" attributeResync="NEVER">
<interface name="MemoryRegion" />
<element schema="VOID" />
<attribute name="_offset" schema="LONG" required="yes" fixed="yes" hidden="yes" />
<attribute name="_objfile" schema="STRING" required="yes" fixed="yes" hidden="yes" />
<attribute name="_readable" schema="BOOL" required="yes" hidden="yes" />
<attribute name="_writable" schema="BOOL" required="yes" hidden="yes" />
<attribute name="_executable" schema="BOOL" required="yes" hidden="yes" />
<attribute name="_range" schema="RANGE" required="yes" hidden="yes" />
<attribute name="_memory" schema="Memory" required="yes" fixed="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="SectionContainer" canonical="yes" elementResync="NEVER" attributeResync="NEVER">
<interface name="SectionContainer" />
<element schema="Section" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="Stack" canonical="yes" elementResync="NEVER" attributeResync="NEVER">
<interface name="Stack" />
<element schema="StackFrame" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="SymbolContainer" canonical="yes" elementResync="ONCE" attributeResync="NEVER">
<interface name="SymbolNamespace" />
<element schema="Symbol" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="Symbol" elementResync="NEVER" attributeResync="NEVER">
<interface name="Symbol" />
<element schema="VOID" />
<attribute name="_size" schema="LONG" fixed="yes" hidden="yes" />
<attribute name="_namespace" schema="SymbolContainer" required="yes" fixed="yes" hidden="yes" />
<attribute name="_data_type" schema="DATA_TYPE" fixed="yes" hidden="yes" />
<attribute name="_value" schema="ADDRESS" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute name="_bpt" schema="STRING" />
<attribute schema="VOID" />
</schema>
<schema name="StackFrame" elementResync="NEVER" attributeResync="NEVER">
<interface name="StackFrame" />
<interface name="Aggregate" />
<element schema="VOID" />
<attribute name="_function" schema="STRING" hidden="yes" />
<attribute name="Registers" schema="RegisterValueContainer" required="yes" fixed="yes" />
<attribute name="_pc" schema="ADDRESS" required="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="Section" elementResync="NEVER" attributeResync="NEVER">
<interface name="Section" />
<element schema="VOID" />
<attribute name="range" schema="RANGE" />
<attribute name="_module" schema="Module" required="yes" fixed="yes" hidden="yes" />
<attribute name="_range" schema="RANGE" required="yes" fixed="yes" />
<attribute name="_offset" schema="INT" required="no" fixed="yes" />
<attribute name="_objfile" schema="STRING" required="no" fixed="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="RegisterValueContainer" canonical="yes" elementResync="ONCE" attributeResync="ONCE">
<interface name="RegisterContainer" />
<interface name="RegisterBank" />
<element schema="RegisterValue" />
<attribute name="General Purpose Registers" schema="RegisterBank" />
<attribute name="Floating Point Registers" schema="RegisterBank" />
<attribute name="Advanced Vector Extensions" schema="RegisterBank" />
<attribute name="Memory Protection Extensions" schema="RegisterBank" />
<attribute name="_descriptions" schema="RegisterValueContainer" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="RegisterBank" canonical="yes" elementResync="ONCE" attributeResync="NEVER">
<interface name="RegisterBank" />
<element schema="RegisterValue" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
<schema name="RegisterValue" elementResync="NEVER" attributeResync="NEVER">
<interface name="Register" />
<element schema="VOID" />
<attribute name="_container" schema="OBJECT" required="yes" fixed="yes" hidden="yes" />
<attribute name="_length" schema="INT" required="yes" fixed="yes" hidden="yes" />
<attribute name="_value" schema="ANY" hidden="yes" />
<attribute name="_type" schema="STRING" hidden="yes" />
<attribute name="_display" schema="STRING" hidden="yes" />
<attribute name="_short_display" schema="STRING" hidden="yes" />
<attribute name="_kind" schema="STRING" fixed="yes" hidden="yes" />
<attribute name="_order" schema="INT" hidden="yes" />
<attribute name="_modified" schema="BOOL" hidden="yes" />
<attribute schema="VOID" />
</schema>
</context>

View File

@ -0,0 +1,236 @@
## ###
# IP: GHIDRA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from collections import namedtuple
import os
import re
import sys
import lldb
LldbVersion = namedtuple('LldbVersion', ['full', 'major', 'minor'])
def _compute_lldb_ver():
blurb = lldb.debugger.GetVersionString()
top = blurb.split('\n')[0]
full = top.split(' ')[2]
major, minor = full.split('.')[:2]
return LldbVersion(full, int(major), int(minor))
LLDB_VERSION = _compute_lldb_ver()
GNU_DEBUGDATA_PREFIX = ".gnu_debugdata for "
class Module(namedtuple('BaseModule', ['name', 'base', 'max', 'sections'])):
pass
class Section(namedtuple('BaseSection', ['name', 'start', 'end', 'offset', 'attrs'])):
def better(self, other):
start = self.start if self.start != 0 else other.start
end = self.end if self.end != 0 else other.end
offset = self.offset if self.offset != 0 else other.offset
attrs = dict.fromkeys(self.attrs)
attrs.update(dict.fromkeys(other.attrs))
return Section(self.name, start, end, offset, list(attrs))
# AFAICT, Objfile does not give info about load addresses :(
class ModuleInfoReader(object):
def name_from_line(self, line):
mat = self.objfile_pattern.fullmatch(line)
if mat is None:
return None
n = mat['name']
if n.startswith(GNU_DEBUGDATA_PREFIX):
return None
return None if mat is None else mat['name']
def section_from_sbsection(self, s):
start = s.GetLoadAddress(get_target())
if start >= sys.maxsize*2:
start = 0
end = start + s.GetFileByteSize()
offset = s.GetFileOffset()
name = s.GetName()
attrs = s.GetPermissions()
return Section(name, start, end, offset, attrs)
def finish_module(self, name, sections):
alloc = {k: s for k, s in sections.items()}
if len(alloc) == 0:
return Module(name, 0, 0, alloc)
# TODO: This may not be the module base, depending on headers
all_zero = True
for s in alloc.values():
if s.start != 0:
all_zero = False
if all_zero:
base_addr = 0
else:
base_addr = min(s.start for s in alloc.values() if s.start != 0)
max_addr = max(s.end for s in alloc.values())
return Module(name, base_addr, max_addr, alloc)
def get_modules(self):
modules = {}
name = None
sections = {}
for i in range(0, get_target().GetNumModules()):
module = get_target().GetModuleAtIndex(i)
fspec = module.GetFileSpec()
name = debracket(fspec.GetFilename())
sections = {}
for i in range(0, module.GetNumSections()):
s = self.section_from_sbsection(module.GetSectionAtIndex(i))
sname = debracket(s.name)
sections[sname] = s
modules[name] = self.finish_module(name, sections)
return modules
def _choose_module_info_reader():
return ModuleInfoReader()
MODULE_INFO_READER = _choose_module_info_reader()
class Region(namedtuple('BaseRegion', ['start', 'end', 'offset', 'perms', 'objfile'])):
pass
class RegionInfoReader(object):
def region_from_sbmemreg(self, info):
start = info.GetRegionBase()
end = info.GetRegionEnd()
offset = info.GetRegionBase()
if offset >= sys.maxsize:
offset = 0
perms = ""
if info.IsReadable():
perms += 'r'
if info.IsWritable():
perms += 'w'
if info.IsExecutable():
perms += 'x'
objfile = info.GetName()
return Region(start, end, offset, perms, objfile)
def get_regions(self):
regions = []
reglist = get_process().GetMemoryRegions()
for i in range(0, reglist.GetSize()):
module = get_target().GetModuleAtIndex(i)
info = lldb.SBMemoryRegionInfo();
success = reglist.GetMemoryRegionAtIndex(i, info);
if success:
r = self.region_from_sbmemreg(info)
regions.append(r)
return regions
def full_mem(self):
# TODO: This may not work for Harvard architectures
sizeptr = int(parse_and_eval('sizeof(void*)')) * 8
return Region(0, 1 << sizeptr, 0, None, 'full memory')
def _choose_region_info_reader():
return RegionInfoReader()
REGION_INFO_READER = _choose_region_info_reader()
BREAK_LOCS_CMD = 'breakpoint list {}'
BREAK_PATTERN = re.compile('')
BREAK_LOC_PATTERN = re.compile('')
class BreakpointLocation(namedtuple('BaseBreakpointLocation', ['address', 'enabled', 'thread_groups'])):
pass
class BreakpointLocationInfoReader(object):
def get_locations(self, breakpoint):
return breakpoint.locations
def _choose_breakpoint_location_info_reader():
return BreakpointLocationInfoReader()
BREAKPOINT_LOCATION_INFO_READER = _choose_breakpoint_location_info_reader()
def get_debugger():
return lldb.SBDebugger.FindDebuggerWithID(1)
def get_target():
return get_debugger().GetTargetAtIndex(0)
def get_process():
return get_target().GetProcess()
def selected_thread():
return get_process().GetSelectedThread()
def selected_frame():
return selected_thread().GetSelectedFrame()
def parse_and_eval(expr, signed=False):
if signed is True:
return get_target().EvaluateExpression(expr).GetValueAsSigned()
return get_target().EvaluateExpression(expr).GetValueAsUnsigned()
def get_eval(expr):
return get_target().EvaluateExpression(expr)
def get_description(object, level=None):
stream = lldb.SBStream()
if level is None:
object.GetDescription(stream)
else:
object.GetDescription(stream, level)
return escape_ansi(stream.GetData())
conv_map = {}
def get_convenience_variable(id):
#val = get_target().GetEnvironment().Get(id)
if id not in conv_map:
return "auto"
val = conv_map[id]
if val is None:
return "auto"
return val
def set_convenience_variable(id, value):
#env = get_target().GetEnvironment()
#return env.Set(id, value, True)
conv_map[id] = value
def escape_ansi(line):
ansi_escape =re.compile(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]')
return ansi_escape.sub('', line)
def debracket(init):
val = init
val = val.replace("[","(")
val = val.replace("]",")")
return val

View File

@ -0,0 +1,25 @@
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[project]
name = "ghidralldb"
version = "10.4"
authors = [
{ name="Ghidra Development Team" },
]
description = "Ghidra's Plugin for lldb"
readme = "README.md"
requires-python = ">=3.7"
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
]
dependencies = [
"ghidratrace==10.4",
]
[project.urls]
"Homepage" = "https://github.com/NationalSecurityAgency/ghidra"
"Bug Tracker" = "https://github.com/NationalSecurityAgency/ghidra/issues"

View File

@ -31,54 +31,54 @@ import ghidra.dbg.testutil.DummyProc;
public enum MacOSSpecimen implements DebuggerTestSpecimen, DebuggerModelTestUtils {
SPIN {
@Override
String getCommandLine() {
public String getCommandLine() {
return DummyProc.which("expSpin");
}
},
FORK_EXIT {
@Override
String getCommandLine() {
public String getCommandLine() {
return DummyProc.which("expFork");
}
},
CLONE_EXIT {
@Override
String getCommandLine() {
public String getCommandLine() {
return DummyProc.which("expCloneExit");
}
},
PRINT {
@Override
String getCommandLine() {
public String getCommandLine() {
return DummyProc.which("expPrint");
}
},
REGISTERS {
@Override
String getCommandLine() {
public String getCommandLine() {
return DummyProc.which("expRegisters");
}
},
STACK {
@Override
String getCommandLine() {
public String getCommandLine() {
return DummyProc.which("expStack");
}
},
CREATE_PROCESS {
@Override
String getCommandLine() {
public String getCommandLine() {
return DummyProc.which("expCreateProcess");
}
},
CREATE_THREAD_EXIT {
@Override
String getCommandLine() {
public String getCommandLine() {
return DummyProc.which("expCreateThreadExit");
}
};
abstract String getCommandLine();
public abstract String getCommandLine();
@Override
public DummyProc runDummy() throws Throwable {
@ -117,24 +117,19 @@ public enum MacOSSpecimen implements DebuggerTestSpecimen, DebuggerModelTestUtil
}
@Override
public boolean isRunningIn(TargetProcess process, AbstractDebuggerModelTest test)
throws Throwable {
public boolean isRunningIn(TargetProcess process, AbstractDebuggerModelTest test) throws Throwable {
// NB. ShellUtils.parseArgs removes the \s. Not good.
String expected = getBinModuleName();
TargetObject session = process.getParent().getParent();
Collection<TargetModule> modules =
test.m.findAll(TargetModule.class, session.getPath(), true).values();
return modules.stream()
.anyMatch(m -> expected.equalsIgnoreCase(getShortName(m.getModuleName())));
Collection<TargetModule> modules = test.m.findAll(TargetModule.class, session.getPath(), true).values();
return modules.stream().anyMatch(m -> expected.equalsIgnoreCase(getShortName(m.getModuleName())));
}
@Override
public boolean isAttachable(DummyProc dummy, TargetAttachable attachable,
AbstractDebuggerModelTest test) throws Throwable {
public boolean isAttachable(DummyProc dummy, TargetAttachable attachable, AbstractDebuggerModelTest test)
throws Throwable {
waitOn(attachable.fetchAttributes());
long pid =
attachable.getTypedAttributeNowByName(LldbModelTargetAvailable.PID_ATTRIBUTE_NAME,
Long.class, -1L);
long pid = attachable.getTypedAttributeNowByName(LldbModelTargetAvailable.PID_ATTRIBUTE_NAME, Long.class, -1L);
return pid == dummy.pid;
}
}

View File

@ -13,97 +13,21 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*plugins {
id 'com.google.protobuf' version '0.8.10'
}*/
apply from: "${rootProject.projectDir}/gradle/javaProject.gradle"
apply from: "${rootProject.projectDir}/gradle/jacocoProject.gradle"
apply from: "${rootProject.projectDir}/gradle/javaTestProject.gradle"
apply from: "${rootProject.projectDir}/gradle/distributableGhidraModule.gradle"
apply from: "${rootProject.projectDir}/gradle/debugger/hasProtobuf.gradle"
apply plugin: 'eclipse'
eclipse.project.name = 'Debug Debugger-gadp'
configurations {
allProtocArtifacts
protocArtifact
}
def platform = getCurrentPlatformName()
dependencies {
allProtocArtifacts 'com.google.protobuf:protoc:3.21.8:windows-x86_64@exe'
allProtocArtifacts 'com.google.protobuf:protoc:3.21.8:linux-x86_64@exe'
allProtocArtifacts 'com.google.protobuf:protoc:3.21.8:linux-aarch_64@exe'
allProtocArtifacts 'com.google.protobuf:protoc:3.21.8:osx-x86_64@exe'
allProtocArtifacts 'com.google.protobuf:protoc:3.21.8:osx-aarch_64@exe'
if (isCurrentWindows()) {
protocArtifact 'com.google.protobuf:protoc:3.21.8:windows-x86_64@exe'
}
if (isCurrentLinux()) {
if (platform.endsWith("x86_64")) {
protocArtifact 'com.google.protobuf:protoc:3.21.8:linux-x86_64@exe'
}
else {
protocArtifact 'com.google.protobuf:protoc:3.21.8:linux-aarch_64@exe'
}
}
if (isCurrentMac()) {
if (platform.endsWith("x86_64")) {
protocArtifact 'com.google.protobuf:protoc:3.21.8:osx-x86_64@exe'
}
else {
protocArtifact 'com.google.protobuf:protoc:3.21.8:osx-aarch_64@exe'
}
}
api project(':Framework-AsyncComm')
api project(':Framework-Debugging')
api project(':ProposedUtils')
testImplementation project(path: ':Framework-AsyncComm', configuration: 'testArtifacts')
testImplementation project(path: ':Framework-Debugging', configuration: 'testArtifacts')
}
/*protobuf {
protoc {
artifact = 'com.google.protobuf:protoc:3.21.8'
}
}*/
task generateProto {
ext.srcdir = file("src/main/proto")
ext.src = fileTree(srcdir) {
include "**/*.proto"
}
ext.outdir = file("build/generated/source/proto/main/java")
outputs.dir(outdir)
inputs.files(src)
dependsOn(configurations.protocArtifact)
doLast {
def exe = configurations.protocArtifact.first()
if (!isCurrentWindows()) {
exe.setExecutable(true)
}
exec {
commandLine exe, "--java_out=$outdir", "-I$srcdir"
args src
}
}
}
tasks.compileJava.dependsOn(tasks.generateProto)
tasks.eclipse.dependsOn(tasks.generateProto)
rootProject.tasks.prepDev.dependsOn(tasks.generateProto)
sourceSets {
main {
java {
srcDir tasks.generateProto.outdir
}
}
}
zipSourceSubproject.dependsOn generateProto

View File

@ -18,91 +18,24 @@ apply from: "${rootProject.projectDir}/gradle/javaProject.gradle"
apply from: "${rootProject.projectDir}/gradle/jacocoProject.gradle"
apply from: "${rootProject.projectDir}/gradle/javaTestProject.gradle"
apply from: "${rootProject.projectDir}/gradle/distributableGhidraModule.gradle"
apply from: "${rootProject.projectDir}/gradle/debugger/hasProtobuf.gradle"
apply plugin: 'eclipse'
eclipse.project.name = 'Debug Debugger-isf'
configurations {
allProtocArtifacts
protocArtifact
}
def platform = getCurrentPlatformName()
dependencies {
allProtocArtifacts 'com.google.protobuf:protoc:3.21.8:windows-x86_64@exe'
allProtocArtifacts 'com.google.protobuf:protoc:3.21.8:linux-x86_64@exe'
allProtocArtifacts 'com.google.protobuf:protoc:3.21.8:linux-aarch_64@exe'
allProtocArtifacts 'com.google.protobuf:protoc:3.21.8:osx-x86_64@exe'
allProtocArtifacts 'com.google.protobuf:protoc:3.21.8:osx-aarch_64@exe'
if (isCurrentWindows()) {
protocArtifact 'com.google.protobuf:protoc:3.21.8:windows-x86_64@exe'
}
if (isCurrentLinux()) {
if (platform.endsWith("x86_64")) {
protocArtifact 'com.google.protobuf:protoc:3.21.8:linux-x86_64@exe'
}
else {
protocArtifact 'com.google.protobuf:protoc:3.21.8:linux-aarch_64@exe'
}
}
if (isCurrentMac()) {
if (platform.endsWith("x86_64")) {
protocArtifact 'com.google.protobuf:protoc:3.21.8:osx-x86_64@exe'
}
else {
protocArtifact 'com.google.protobuf:protoc:3.21.8:osx-aarch_64@exe'
}
}
api project(':Framework-AsyncComm')
api project(':Framework-Debugging')
api project(':ProposedUtils')
testImplementation project(path: ':Framework-AsyncComm', configuration: 'testArtifacts')
testImplementation project(path: ':Framework-Debugging', configuration: 'testArtifacts')
}
task generateProto {
ext.srcdir = file("src/main/proto")
ext.src = fileTree(srcdir) {
include "**/*.proto"
}
ext.outdir = file("build/generated/source/proto/main/java")
outputs.dir(outdir)
inputs.files(src)
dependsOn(configurations.protocArtifact)
doLast {
def exe = configurations.protocArtifact.first()
if (!isCurrentWindows()) {
exe.setExecutable(true)
}
exec {
commandLine exe, "--java_out=$outdir", "-I$srcdir"
args src
}
}
}
tasks.compileJava.dependsOn(tasks.generateProto)
tasks.eclipse.dependsOn(tasks.generateProto)
rootProject.tasks.prepDev.dependsOn(tasks.generateProto)
sourceSets {
main {
java {
srcDir tasks.generateProto.outdir
}
}
}
zipSourceSubproject.dependsOn generateProto
// Include buildable native source in distribution
rootProject.assembleDistribution {
from (this.project.projectDir.toString()) {
from (this.project.projectDir.toString()) {
include "runISFServer"
into { getZipPath(this.project) }
}
}
}

View File

@ -0,0 +1,280 @@
This is just a scratchpad of notes for development.
After developer documentation is authored, this file should be deleted.
Terminology can be a bit weird regarding client vs server.
Instead, I prefer to use "front end" and "back end".
Ghidra is always the front end, as it provides the UI.
The actual debugger is always the "back end" is it provides the actual instrumentation and access to the target.
wrt/ TCP, the connection can go either way, but once established, Ghidra still plays the front end role.
Client/Server otherwise depends on context.
For the trace-recording channel, the back-end is the client, and the front-end (Ghidra) is the server.
The back-end invokes remote methods on the DBTrace, and those cause DomainObjectChange events, updating the UI.
The front-end replies with minimal information.
(More on this and sync/async/batching later)
For the command channel, the front-end (Ghidra) is the client, and the back-end is the server.
The user presses a button, which invokes a remote method on the back-end.
Often, that method and/or its effects on the target and back-end result in it updating the trace, and the loop is complete.
Again, the back-end replies with minimal information.
One notable exception is the `execute` method, which can optionally return captured console output.
In general, methods should only respond with actual information that doesn't belong in the trace.
While I've not yet needed this, I suppose another exception could be for methods that want to return the path to an object, to clarify association of cause and effect.
Regarding sync/async and batching:
One of the goals of TraceRmi was to simplify the trace-recording process.
It does this in three ways:
1. Providing direct control to write the Trace database.
The ObjectModel approach was more descriptive.
It would announce the existence of things, and a recorder at the front end would decide (applying some arcane rules) what to record and display.
Almost every new model required some adjustment to the recorder.
2. Changing to a synchronous RMI scheme.
The decision to use an asynchronous scheme was to avoid accidental lock-ups of the Swing thread.
In practice, it just poisoned every API that depended on it, and we still got Swing lock-ups.
And worse, they were harder to diagnose, because the stack traces were obscured.
And still worse, execution order and threading was difficult to predict.
We've only been somewhat successful in changing to a fully synchronous scheme, but even then, we've (attempted to) mitigate each of the above complaints.
On the front-end, the internals still use CompletableFuture, but we're more apt to use .get(), which keeps the stack together on the thread waiting for the result.
In essence, there's little difference in blocking on .get() vs blocking on .recv().
The reason we need a dedicated background thread to receive is to sort out the two channels.
The recommended public API method is RemoteMethod.invoke(), which uses .get() internally, so this is mostly transparent, except when debugging the front end.
There is still an .invokeAsync(), if desired, giving better control of timeouts, which is actually a feature we would not have using a purely synchronous .recv() (at least not without implementing non-blocking IO)
To mitigate Swing lock-ups the .get() methods are overridden to explicitly check for the Swing thread.
On the back end, the internals work similarly to the front end.
We use a Future to handle waiting for the result, and the implementation of each trace modification method will immediately invoke .result().
Unfortunately, this does slow things down far too much, since every miniscule operation requires a round trip.
We mitigate this by implementing a `batch` context manager.
Inside this context, most of the trace modification methods will now return the Future.
However, a reference to each such future is stored off in the context.
When the context is exited, all the Futures' results are waited on.
This maintains a mostly synchronous behavior, while alleviating the repeated round-trip costs.
3. Simplifying the back end implementation, and providing it in Python.
It turns out no debugger we've encountered up to this point provides Java language bindings out of the box.
The closest we've seen is LLDB, which has specified their interfaces using SWIG, which lent itself to exporting Java bindings.
And that was lucky, too, because accessing C++ virtual functions from JNA is fraught with peril.
For gdb, we've been using a pseudo-terminal or ssh connection to its Machine Interface, which aside from the piping delays, has been pretty nice.
It's not been great on Windows, though -- their ConPTY stuff has some ANSI oddities, the handling of which has slowed our performance.
For dbgeng/dbgmodel, we've been fortunate that they follow COM+, which is fairly well understood by JNA.
Nevertheless, all of these have required us to hack some kind of native bindings in Java.
This introduces risks of crashing the JVM, and in some cases can cause interesting conflicts, e.g., the JVM and dbgeng may try to handle the same signals differently.
dbgeng also only allows a single session.
If the user connects twice to it using IN-VM (this is easy to do by accident), then the two connections are aliases of the same dbgeng session.
Both gdb and lldb offer Python bindings, so it is an obvious choice for back end implementations.
We are already using protobuf, so we keep it, but developed a new protocol specification.
The trace modification methods are prescribed by Ghidra, so each is implemented specifically in the trace client.
The back end remote methods are described entirely by the back end.
They are enumerated during connection negotiation; otherwise, there is only one generic "Invoke" message.
Because we're more tightly integrated with the debugger, there may be some interesting caveats.
Pay careful attention to synchronization and session tear down.
At one point, I was using gdb's post_event as a Python Executor.
A separate thread handled the method invocation requests, scheduled it on the executor, waited for the result, and then responded.
This worked until the front end invoked `execute("quit")`.
I was expecting gdb to just quit, and the front end would expect the connection to die.
However, this never happened.
Instead, during execution of the `quit`, gdb wanted to clean up the Python interpreter.
Part of that was gracefully cleaning up all the Python threads, one of which was blocking indefinitely on execution of the `quit`.
Thus, the two threads were waiting on each other, and gdb locked up.
Depending on the debugger, the Python API may be more or less mature, and there could be much variation among versions we'd like to support.
For retrieving information, we at least have console capture as a fallback; however, there's not always a reliable way to detect certain events without a direct callback.
At worst, we can always hook something like `prompt`, but if we do, we must be quick in our checks.
Dealing with multiple versions, there's at least two ways:
1. Probe for the feature.
This is one case where Python's dynamic nature helps out.
Use `hasattr` to check for the existence of various features and choose accordingly.
2. Check the version string.
Assuming version information can be consistently and reliably retrieved across all the supported versions, parse it first thing.
If the implementation of a feature various across versions, the appropriate one can be selected.
This may not work well for users of development branches, or are otherwise off the standard releases of their debuggers.
This is probably well understood by the Python community, but I'll overstate it here:
If you've written something, but you haven't unit tested it yet, then you haven't really written it.
This may be mitigated by some static analysis tools and type annotations, but I didn't use them.
In fact, you might even say I abused type annotations for remote method specifications.
For gdb, I did all of my unit testing using JUnit as the front end in Java.
This is perhaps not ideal, since this is inherently an integration test; nevertheless, it does allow me to test each intended feature of the back end separately.
# Package installation
I don't know what the community preference will be here, but now that we're playing in the Python ecosystem, we have to figure out how to play nicely.
Granted, some of this depends on how nicely the debugger plays in the Python ecosystem.
My current thought is distribute our stuff as Python packages, and let the user figure it out.
We'll still want to figure out the best way, if possible, to make things work out of the box.
Nevertheless, a `pip install` command may not be *that* offensive for a set-up step.
That said, for unit testing, I've had to incorporate package installation as a @BeforeClass method.
There's probably a better way, and that way may also help with out-of-the-box support.
Something like setting PYTHON_PATH before invoking the debugger?
There's still the issue of installing protobuf, though.
And the version we use is not the latest, which may put users who already have protobuf in dependency hell.
We use version 3.20, while the latest is 4.something.
According to protobuf docs, major versions are not guaranteed backward compatible.
To upgrade, we'd also have to upgrade the Java side.
# Protobuf headaches
Protobufs in Java have these nifty `writeDelimitedTo` and `parseDelimitedFrom` methods.
There's no equivalent for Python :(
That said, according to a stackoverflow post (which I've lost track of, but it's easily confirmed by examining protobufs Java source), you can hand-spin this by prepending a varint giving each message's length.
If only the varint codec were part of protobuf's public Python API....
They're pretty easily accessed in Python by importing the `internal` package, but that's probably not a good idea.
Also, (as I had been doing that), it's easy to goof up receiving just variable-length int and keeping the encoded message in tact for parsing.
I instead just use a fixed 32-bit int now.
# How-To?
For now, I'd say just the the gdb implementation as a template / guide.
Just beware, the whole thing is a bit unstable, so the code may change, but still, I don't expect it to change so drastically that integration work would be scrapped.
If you're writing Python, create a Python package following the template for gdb's.
I'd like the version numbers to match Ghidra's, though this may need discussion.
Currently, only Python 3 is supported.
I expect older versions of gdb may not support Py3, so we may need some backporting.
That said, if your distro's package for whatever debugger is compiled for Py2, you may need to build from source, assuming it supports Py3 at all.
I recommend mirroring the file layout:
__init__.py:
Python package marker, but also initialization.
For gdb, this file gets executed when the user types `python import ghidragdb`.
Thus, that's how they load the extension.
arch.py:
Utilities for mapping architecture-specific things between back and front ends.
Technically, you should just be able to use the "DATA" processor for your trace, things will generally work better if you can map.
commands.py:
These are commands we add to the debugger's CLI.
For gdb, we use classes that extend `gdb.Command`, which allows the user to access them whether or not connected to Ghidra.
For now, this is the recommendation, as I expect it'll allow users to "hack" on it more easily, either to customize or to retrieve diagnostics, etc.
Notice that I use gdb's expression evaluator wherever that can enhance the command's usability, e.g., `ghidra trace putval`
hooks.py:
These are event callbacks from the debugger as well as whatever plumbing in necessary to actually install them.
That "plumbing" may vary, since the debugger may not directly support the callback you're hoping for.
In gdb, there are at least 3 flavors:
1. A directly-supported callback, i.e., in `gdb.events`
2. A breakpoint callback, which also breaks down into two sub-flavors:
* Internal breakpoint called back via `gdb.Breakpoint.stop`
* Normal breakpoint whose commands invoke a CLI command
3. A hooked command to invoke a CLI command, e.g., `define hook-inferior`
method.py:
These are remote methods available to the front end.
See the `MethodRegistry` object in the Python implementation, or the `RemoteMethod` interface in the Java implementation.
parameters.py:
These are for gdb parameters, which may not map to anything in your debugger, so adjust as necessary.
They're preferred to custom commands whose only purpose is to access a variable.
schema.xml:
This is exactly what you think it is.
It is recommended you copy this directly from the ObjectModel-based implementation and make adjustments as needed.
See `commands.start_trace` to see how to load this file from your Python package.
util.py:
Just utilities and such.
For the gdb connector, this is where I put my version-specific implementations, e.g., to retrieve the memory map and module list.
For testing, similarly copy the JUnit tests (they're in the IntegrationTests project) into a separate properly named package.
I don't intend to factor out test cases, except for a few utilities.
The only real service that did in the past was to remind you what cases you ought to test.
Prescribing exactly *how* to test those and the scenarios, I think, was a mistake.
If I provide a base test class, it might just be to name some methods that all fail by default.
Then, as a tester, the failures would remind you to override each method with the actual test code.
For manual testing, I've used two methods
1. See `GdbCommandsTest#testManual`.
Uncomment it to have JUnit start a trace-rmi front-end listener.
You can then manually connect from inside your debugger and send/diagnose commands one at a time.
Typically, I'd use the script from another test that was giving me trouble.
2. Start the full Ghidra Debugger and use a script to connect.
At the moment, there's little UI integration beyond what is already offered by viewing a populated trace.
Use either ConnectTraceRmiScript or ListenTraceRmiScript and follow the prompts / console.
The handler will activate the trace when commanded, and it will follow the latest snapshot.
# User installation instructions:
The intent is to provide .whl or whatever Python packages as part of the Ghidra distribution.
A user should be able to install them using `pip3 install ...`, however:
We've recently encountered issues where the version of Python that gdb is linked to may not be the same version of Python the user gets when the type `python`, `python3` or `pip3`.
To manually check for this version, a user must type, starting in their shell:
```bash
gdb
python-interactive
import sys
print(sys.version)
```
Suppose they get `3.8.10`.
They'd then take the major and minor numbers to invoke `python3.8` directly:
```bash
python3.8 -m pip install ...
```
A fancy way to just have gdb print the python command for you is:
```bash
gdb --batch -ex 'python import sys' -ex 'python print(f"python{sys.version_info.major}.{sys.version_info.minor}")'
```
Regarding method registry, the executor has to be truly asynchronous.
You cannot just invoke the method synchronously and return a completed future.
If you do, you'll hang the message receiver thread, which may need to be free if the invoked method interacts with the trace.
We've currently adopted a method-naming convention that aims for a somewhat consistent API across back-end plugins.
In general, the method name should match the action name exactly, e.g., the method corresponding the Ghidra's `resume` action should be defined as:
@REGISTRY.method
def resume(...):
...
Not:
@REGISTRY.method(name='continue', action='resume')
def _continue(...):
...
Even though the back-end's command set and/or API may call it "continue."
If you would like to provide a hint to the user regarding the actual back-end command, do so in the method's docstring:
@REGISTRY.method
def resume(...):
"""Continue execution of the current target (continue)."""
...
There are exceptions:
1. When there is not a one-to-one mapping from the method to an action.
This is usually the case for delete, toggle, refresh, etc.
For these, use the action as the prefix, and then some suffix, usually describing the type of object affected, e.g., delete_breakpoint.
2. When using an "_ext" class of action, e.g., step_ext or break_ext.
There is almost certainly not a one-to-one method for such an action.
The naming convention is the same as 1, but omitting the "_ext", e.g., step_advance or break_event
Even if you only have one method that maps to step_ext, the method should *never* be called step_ext.
3. There is no corresponding action at all.
In this case, call it what you want, but strive for consistency among related methods in this category for your back-end.
Act as though there could one day be a Ghidra action that you'd like to map them to.
There may be some naming you find annoying, e.g., "resume" (not "continue") or "launch" (not "start")
We also do not use the term "watchpoint." We instead say "write breakpoint."
Thus, the method for placing one is named `break_write_whatever`, not `watch_whatever`.
# Regarding transactions:
At the moment, I've defined two modes for transaction management on the client side.
The server side couldn't care less. A transactions is a transaction.
For hooks, i.e., things driven by events on the back end, use the client's transaction manager directly.
For commands, i.e., things driven by the user via the CLI, things are a little dicey.
I wouldn't expect the user to manage multiple transaction objects.
The recommendation is that the CLI can have at most one active transaction.
For the user to open a second transaction may be considered an error.
Take care as you're coding (and likely re-using command logic) that you don't accidentally take or otherwise conflict with the CLI's transaction manager when processing an event.

View File

@ -0,0 +1,56 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
apply from: "${rootProject.projectDir}/gradle/javaProject.gradle"
apply from: "${rootProject.projectDir}/gradle/jacocoProject.gradle"
apply from: "${rootProject.projectDir}/gradle/javaTestProject.gradle"
apply from: "${rootProject.projectDir}/gradle/distributableGhidraModule.gradle"
apply from: "${rootProject.projectDir}/gradle/debugger/hasProtobuf.gradle"
apply from: "${rootProject.projectDir}/gradle/debugger/hasPythonPackage.gradle"
apply plugin: 'eclipse'
eclipse.project.name = 'Debug Debugger-rmi-trace'
dependencies {
api project(':Debugger')
}
task generateProtoPy {
ext.srcdir = file("src/main/proto")
ext.src = fileTree(srcdir) {
include "**/*.proto"
}
ext.outdir = file("build/generated/source/proto/main/py")
outputs.dir(outdir)
inputs.files(src)
dependsOn(configurations.protocArtifact)
doLast {
def exe = configurations.protocArtifact.first()
if (!isCurrentWindows()) {
exe.setExecutable(true)
}
exec {
commandLine exe, "--python_out=$outdir", "-I$srcdir"
args src
}
}
}
tasks.assemblePyPackage {
from(generateProtoPy) {
into "src/ghidratrace"
}
}

View File

@ -0,0 +1,7 @@
##VERSION: 2.0
DEVNOTES.txt||GHIDRA||||END|
Module.manifest||GHIDRA||||END|
src/main/py/LICENSE||GHIDRA||||END|
src/main/py/README.md||GHIDRA||||END|
src/main/py/pyproject.toml||GHIDRA||||END|
src/main/py/tests/EMPTY||GHIDRA||||END|

View File

@ -0,0 +1,48 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.net.InetSocketAddress;
import java.util.Map;
import java.util.Objects;
import ghidra.app.plugin.core.debug.service.rmi.trace.TraceRmiHandler;
import ghidra.app.plugin.core.debug.service.rmi.trace.TraceRmiPlugin;
import ghidra.app.script.GhidraScript;
import ghidra.app.services.TraceRmiService;
public class ConnectTraceRmiScript extends GhidraScript {
TraceRmiService getService() throws Exception {
TraceRmiService service = state.getTool().getService(TraceRmiService.class);
if (service != null) {
return service;
}
state.getTool().addPlugin(TraceRmiPlugin.class.getName());
return Objects.requireNonNull(state.getTool().getService(TraceRmiService.class));
}
@Override
protected void run() throws Exception {
TraceRmiService service = getService();
TraceRmiHandler handler = service.connect(
new InetSocketAddress(askString("Trace RMI", "hostname", "localhost"), askInt("Trace RMI", "port")));
println("Connected");
handler.start();
// if (askYesNo("Execute?", "Execute 'echo test'?")) {
// handler.getMethods().get("execute").invoke(Map.of("cmd", "script print('test')"));
// }
}
}

View File

@ -0,0 +1,48 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.Map;
import java.util.Objects;
import ghidra.app.plugin.core.debug.service.rmi.trace.*;
import ghidra.app.script.GhidraScript;
import ghidra.app.services.TraceRmiService;
public class ListenTraceRmiScript extends GhidraScript {
TraceRmiService getService() throws Exception {
TraceRmiService service = state.getTool().getService(TraceRmiService.class);
if (service != null) {
return service;
}
state.getTool().addPlugin(TraceRmiPlugin.class.getName());
return Objects.requireNonNull(state.getTool().getService(TraceRmiService.class));
}
@Override
protected void run() throws Exception {
TraceRmiService service = getService();
TraceRmiAcceptor acceptor = service.acceptOne(null);
println("Listening at " + acceptor.getAddress());
TraceRmiHandler handler = acceptor.accept();
println("Connection from " + handler.getRemoteAddress());
handler.start();
while (askYesNo("Execute?", "Execute 'echo test'?")) {
handler.getMethods().get("execute").invoke(Map.of("cmd", "echo test"));
}
}
}

View File

@ -0,0 +1,107 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.app.plugin.core.debug.service.rmi.trace;
import ghidra.app.plugin.core.debug.service.rmi.trace.TraceRmiHandler.*;
import ghidra.program.model.address.*;
import ghidra.program.model.lang.Register;
import ghidra.rmi.trace.TraceRmi.*;
import ghidra.trace.model.Trace;
import ghidra.trace.model.target.TraceObject;
import ghidra.trace.model.time.TraceSnapshot;
class OpenTrace implements ValueDecoder {
final DoId doId;
final Trace trace;
TraceSnapshot lastSnapshot;
OpenTrace(DoId doId, Trace trace) {
this.doId = doId;
this.trace = trace;
}
public TraceSnapshot createSnapshot(Snap snap, String description) {
TraceSnapshot snapshot = trace.getTimeManager().getSnapshot(snap.getSnap(), true);
snapshot.setDescription(description);
return this.lastSnapshot = snapshot;
}
public TraceObject getObject(long id, boolean required) {
TraceObject object = trace.getObjectManager().getObjectById(id);
if (object == null) {
throw new InvalidObjIdError();
}
return object;
}
public TraceObject getObject(ObjPath path, boolean required) {
TraceObject object =
trace.getObjectManager().getObjectByCanonicalPath(TraceRmiHandler.toKeyPath(path));
if (required && object == null) {
throw new InvalidObjPathError();
}
return object;
}
@Override
public TraceObject getObject(ObjDesc desc, boolean required) {
return getObject(desc.getId(), required);
}
@Override
public TraceObject getObject(ObjSpec object, boolean required) {
return switch (object.getKeyCase()) {
case KEY_NOT_SET -> throw new TraceRmiError("Must set id or path");
case ID -> getObject(object.getId(), required);
case PATH -> getObject(object.getPath(), required);
default -> throw new AssertionError();
};
}
public AddressSpace getSpace(String name, boolean required) {
AddressSpace space = trace.getBaseAddressFactory().getAddressSpace(name);
if (required && space == null) {
throw new NoSuchAddressSpaceError();
}
return space;
}
@Override
public Address toAddress(Addr addr, boolean required) {
AddressSpace space = getSpace(addr.getSpace(), required);
return space.getAddress(addr.getOffset());
}
@Override
public AddressRange toRange(AddrRange range, boolean required)
throws AddressOverflowException {
AddressSpace space = getSpace(range.getSpace(), required);
if (space == null) {
return null;
}
Address min = space.getAddress(range.getOffset());
Address max = space.getAddress(range.getOffset() + range.getExtend());
return new AddressRangeImpl(min, max);
}
public Register getRegister(String name, boolean required) {
Register register = trace.getBaseLanguage().getRegister(name);
if (required && register == null) {
throw new InvalidRegisterError(name);
}
return register;
}
}

View File

@ -0,0 +1,70 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.app.plugin.core.debug.service.rmi.trace;
import java.util.concurrent.*;
import ghidra.trace.model.target.TraceObject;
import ghidra.util.Swing;
/**
* The future result of invoking a {@link RemoteMethod}.
*
* <p>
* While this can technically result in an object, returning values from remote methods is highly
* discouraged. This has led to several issues in the past, including duplication of information
* (and a lot of it) over the connection. Instead, most methods should just update the trace
* database, and the client can retrieve the relevant information from it. One exception might be
* the {@code execute} method. This is typically for executing a CLI command with captured output.
* There is generally no place for such output to go into the trace, and the use cases for such a
* method to return the output are compelling. For other cases, perhaps the most you can do is
* return a {@link TraceObject}, so that a client can quickly associate the trace changes with the
* method. Otherwise, please return null/void/None for all methods.
*
* <b>NOTE:</b> To avoid the mistake of blocking the Swing thread on an asynchronous result, the
* {@link #get()} methods have been overridden to check for the Swing thread. If invoked on the
* Swing thread with a timeout greater than 1 second, an assertion error will be thrown. Please use
* a non-swing thread, e.g., a task thread or script thread, to wait for results, or chain
* callbacks.
*/
public class RemoteAsyncResult extends CompletableFuture<Object> {
final ValueDecoder decoder;
public RemoteAsyncResult() {
this.decoder = ValueDecoder.DEFAULT;
}
public RemoteAsyncResult(OpenTrace open) {
this.decoder = open;
}
@Override
public Object get() throws InterruptedException, ExecutionException {
if (Swing.isSwingThread()) {
throw new AssertionError("Refusing indefinite wait on Swing thread");
}
return super.get();
}
@Override
public Object get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
if (Swing.isSwingThread() && unit.toSeconds(timeout) > 1) {
throw new AssertionError("Refusing a timeout > 1 second on Swing thread");
}
return super.get(timeout, unit);
}
}

View File

@ -0,0 +1,330 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.app.plugin.core.debug.service.rmi.trace;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.function.Function;
import ghidra.async.AsyncUtils;
import ghidra.dbg.target.TargetObject;
import ghidra.dbg.target.schema.*;
import ghidra.dbg.target.schema.TargetObjectSchema.SchemaName;
import ghidra.trace.model.Trace;
import ghidra.trace.model.target.TraceObject;
/**
* A remote method registered by the back-end debugger.
*
* <p>
* Remote methods must describe the parameters names and types at a minimum. They should also
* provide a display name and description for the method itself and each of its parameters. These
* methods should not return a result. Instead, any "result" should be recorded into a trace. The
* invocation can result in an error, which is communicated by an exception that can carry only a
* message string. Choice few methods should return a result, for example, the {@code execute}
* method with output capture. That output generally does not belong in a trace, so the only way to
* communicate it back to the front end is to return it.
*/
public interface RemoteMethod {
/**
* A "hint" for how to map the method to a common action.
*
* <p>
* Many common commands/actions have varying names across different back-end debuggers. We'd
* like to present common idioms for these common actions, but allow them to keep the names used
* by the back-end, because those names are probably better known to users of that back-end than
* Ghidra's action names are known. The action hints will affect the icon and placement of the
* action in the UI, but the display name will still reflect the name given by the back-end.
* Note that the "stock" action names are not a fixed enumeration. These are just the ones that
* might get special treatment from Ghidra. All methods should appear somewhere (at least, e.g.,
* in context menus for applicable objects), even if the action name is unspecified or does not
* match a stock name. This list may change over time, but that shouldn't matter much. Each
* back-end should make its best effort to match its methods to these stock actions where
* applicable, but ultimately, it is up to the UI to decide what is presented where.
*/
public record Action(String name) {
public static final Action REFRESH = new Action("refresh");
public static final Action ACTIVATE = new Action("activate");
/**
* A weaker form of activate.
*
* <p>
* The user has expressed interest in an object, but has not activated it yet. This is often
* used to communicate selection (i.e., highlight) of the object. Whereas, double-clicking
* or pressing enter would more likely invoke 'activate.'
*/
public static final Action FOCUS = new Action("focus");
public static final Action TOGGLE = new Action("toggle");
public static final Action DELETE = new Action("delete");
/**
* Forms: (cmd:STRING):STRING
*
* Optional arguments: capture:BOOL
*/
public static final Action EXECUTE = new Action("execute");
/**
* Forms: (spec:STRING)
*/
public static final Action CONNECT = new Action("connect");
/**
* Forms: (target:Attachable), (pid:INT), (spec:STRING)
*/
public static final Action ATTACH = new Action("attach");
public static final Action DETACH = new Action("detach");
/**
* Forms: (command_line:STRING), (file:STRING,args:STRING), (file:STRING,args:STRING_ARRAY),
* (ANY*)
*/
public static final Action LAUNCH = new Action("launch");
public static final Action KILL = new Action("kill");
public static final Action RESUME = new Action("resume");
public static final Action INTERRUPT = new Action("interrupt");
/**
* All of these will show in the "step" portion of the control toolbar, if present. The
* difference in each "step_x" is minor. The icon will indicate which form, and the
* positions will be shifted so they appear in a consistent order. The display name is
* determined by the method name, not the action name. For stepping actions that don't fit
* the standards, use {@link #STEP_EXT}. There should be at most one of each standard
* applicable for any given context. (Multiple will appear, but may confuse the user.) You
* can have as many extended step actions as you like. They will be ordered
* lexicographically by name.
*/
public static final Action STEP_INTO = new Action("step_into");
public static final Action STEP_OVER = new Action("step_over");
public static final Action STEP_OUT = new Action("step_out");
/**
* Skip is not typically available, except in emulators. If the back-end debugger does not
* have a command for this action out-of-the-box, we do not recommend trying to implement it
* yourself. The purpose of these actions just to expose/map each command to the UI, not to
* invent new features for the back-end debugger.
*/
public static final Action STEP_SKIP = new Action("step_skip");
/**
* Step back is not typically available, except in emulators and timeless (or time-travel)
* debuggers.
*/
public static final Action STEP_BACK = new Action("step_back");
/**
* The action for steps that don't fit one of the common stepping actions.
*/
public static final Action STEP_EXT = new Action("step_ext");
/**
* Forms: (addr:ADDRESS), R/W(rng:RANGE), set(expr:STRING)
*
* Optional arguments: condition:STRING, commands:STRING
*/
public static final Action BREAK_SW_EXECUTE = new Action("break_sw_execute");
public static final Action BREAK_HW_EXECUTE = new Action("break_hw_execute");
public static final Action BREAK_READ = new Action("break_read");
public static final Action BREAK_WRITE = new Action("break_write");
public static final Action BREAK_ACCESS = new Action("break_access");
public static final Action BREAK_EXT = new Action("break_ext");
/**
* Forms: (rng:RANGE)
*/
public static final Action READ_MEM = new Action("read_mem");
/**
* Forms: (addr:ADDRESS,data:BYTES)
*/
public static final Action WRITE_MEM = new Action("write_mem");
// NOTE: no read_reg. Use refresh(RegContainer), refresh(RegGroup), refresh(Register)
/**
* Forms: (frame:Frame,name:STRING,value:BYTES), (register:Register,value:BYTES)
*/
public static final Action WRITE_REG = new Action("write_reg");
}
/**
* The name of the method.
*
* @return the name
*/
String name();
/**
* A string that hints at the UI action this method achieves.
*
* @return the action
*/
Action action();
/**
* A description of the method.
*
* <p>
* This is the text for tooltips or other information presented by actions whose purpose is to
* invoke this method. If the back-end command name is well known to its users, this text should
* include that name.
*
* @return the description
*/
String description();
/**
* The methods parameters.
*
* <p>
* Parameters are all keyword-style parameters. This returns a map of names to parameter
* descriptions.
*
* @return the parameter map
*/
Map<String, RemoteParameter> parameters();
/**
* Get the schema for the return type.
*
* <b>NOTE:</b> Most methods should return void, i.e., either they succeed, or they throw/raise
* an error message. One notable exception is "execute," which may return the console output
* from executing a command. In most cases, the method should only cause an update to the trace
* database. That effect is its result.
*
* @return the schema name for the method's return type.
*/
SchemaName retType();
/**
* Check the type of an argument.
*
* <p>
* This is a hack, because {@link TargetObjectSchema} expects {@link TargetObject}, or a
* primitive. We instead need {@link TraceObject}. I'd add the method to the schema, except that
* trace stuff is not in its dependencies.
*
* @param name the name of the parameter
* @param sch the type of the parameter
* @param arg the argument
*/
static void checkType(String name, TargetObjectSchema sch, Object arg) {
if (sch.getType() != TargetObject.class) {
if (sch.getType().isInstance(arg)) {
return;
}
}
else if (arg instanceof TraceObject obj) {
if (sch.equals(obj.getTargetSchema())) {
return;
}
}
throw new IllegalArgumentException(
"For parameter %s: argument %s is not a %s".formatted(name, arg, sch));
}
/**
* Validate the given argument.
*
* <p>
* This method is for checking parameter sanity before they are marshalled to the back-end. This
* is called automatically during invocation. Clients can use this method to pre-test or
* validate in the UI, when invocation is not yet desired.
*
* @param arguments the arguments
* @return the trace if any object arguments were given, or null
* @throws IllegalArgumentException if the arguments are not valid
*/
default Trace validate(Map<String, Object> arguments) {
Trace trace = null;
SchemaContext ctx = EnumerableTargetObjectSchema.MinimalSchemaContext.INSTANCE;
for (Map.Entry<String, RemoteParameter> ent : parameters().entrySet()) {
if (!arguments.containsKey(ent.getKey())) {
if (ent.getValue().required()) {
throw new IllegalArgumentException(
"Missing required parameter '" + ent.getKey() + "'");
}
continue; // Should not need to check the default value
}
Object arg = arguments.get(ent.getKey());
if (arg instanceof TraceObject obj) {
if (trace == null) {
trace = obj.getTrace();
ctx = trace.getObjectManager().getRootSchema().getContext();
}
else if (trace != obj.getTrace()) {
throw new IllegalArgumentException(
"All TraceObject parameters must come from the same trace");
}
}
TargetObjectSchema sch = ctx.getSchema(ent.getValue().type());
checkType(ent.getKey(), sch, arg);
}
for (Map.Entry<String, Object> ent : arguments.entrySet()) {
if (!parameters().containsKey(ent.getKey())) {
throw new IllegalArgumentException("Extra argument '" + ent.getKey() + "'");
}
}
return trace;
}
/**
* Invoke the remote method, getting a future result.
*
* <p>
* This invokes the method asynchronously. The returned objects is a {@link CompletableFuture},
* whose getters are overridden to prevent blocking the Swing thread for more than 1 second. Use
* of this method is not recommended, if it can be avoided; however, you should not create a
* thread whose sole purpose is to invoke this method. UI actions that need to invoke a remote
* method should do so using this method, but they must be sure to handle errors using, e.g.,
* using {@link CompletableFuture#exceptionally(Function)}, lest the actions fail silently.
*
* @param arguments the keyword arguments to the remote method
* @return the future result
* @throws IllegalArgumentException if the arguments are not valid
*/
RemoteAsyncResult invokeAsync(Map<String, Object> arguments);
/**
* Invoke the remote method and wait for its completion.
*
* <p>
* This method cannot be invoked from the Swing thread. This is to avoid locking up the user
* interface. If you are on the Swing thread, consider {@link #invokeAsync(Map)} instead. You
* can chain the follow-up actions and then schedule any UI updates on the Swing thread using
* {@link AsyncUtils#SWING_EXECUTOR}.
*
* @param arguments the keyword arguments to the remote method
* @throws IllegalArgumentException if the arguments are not valid
*/
default Object invoke(Map<String, Object> arguments) {
try {
return invokeAsync(arguments).get();
}
catch (InterruptedException | ExecutionException e) {
throw new TraceRmiError(e);
}
}
record RecordRemoteMethod(TraceRmiHandler handler, String name, Action action,
String description, Map<String, RemoteParameter> parameters, SchemaName retType)
implements RemoteMethod {
@Override
public RemoteAsyncResult invokeAsync(Map<String, Object> arguments) {
Trace trace = validate(arguments);
OpenTrace open = handler.getOpenTrace(trace);
return handler.invoke(open, name, arguments);
}
}
}

View File

@ -0,0 +1,50 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.app.plugin.core.debug.service.rmi.trace;
import java.util.*;
import ghidra.app.plugin.core.debug.service.rmi.trace.RemoteMethod.Action;
public class RemoteMethodRegistry {
private final Map<String, RemoteMethod> map = new HashMap<>();
private final Map<Action, Set<RemoteMethod>> byAction = new HashMap<>();
protected void add(RemoteMethod method) {
synchronized (map) {
map.put(method.name(), method);
byAction.computeIfAbsent(method.action(), k -> new HashSet<>()).add(method);
}
}
public Map<String, RemoteMethod> all() {
synchronized (map) {
return Map.copyOf(map);
}
}
public RemoteMethod get(String name) {
synchronized (map) {
return map.get(name);
}
}
public Set<RemoteMethod> getByAction(Action action) {
synchronized (map) {
return byAction.getOrDefault(action, Set.of());
}
}
}

View File

@ -0,0 +1,22 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.app.plugin.core.debug.service.rmi.trace;
import ghidra.dbg.target.schema.TargetObjectSchema.SchemaName;
public record RemoteParameter(String name, SchemaName type, boolean required,
ValueSupplier defaultValue, String display, String description) {
}

View File

@ -0,0 +1,45 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.app.plugin.core.debug.service.rmi.trace;
import java.io.IOException;
import java.net.ServerSocket;
import java.net.SocketAddress;
public class TraceRmiAcceptor extends TraceRmiServer {
public TraceRmiAcceptor(TraceRmiPlugin plugin, SocketAddress address) {
super(plugin, address);
}
@Override
public void start() throws IOException {
socket = new ServerSocket();
bind();
}
@Override
protected void bind() throws IOException {
socket.bind(address, 1);
}
@Override
public TraceRmiHandler accept() throws IOException {
TraceRmiHandler handler = super.accept();
close();
return handler;
}
}

View File

@ -0,0 +1,33 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.app.plugin.core.debug.service.rmi.trace;
public class TraceRmiError extends RuntimeException {
public TraceRmiError() {
}
public TraceRmiError(Throwable cause) {
super(cause);
}
public TraceRmiError(String message) {
super(message);
}
public TraceRmiError(String message, Throwable cause) {
super(message, cause);
}
}

View File

@ -0,0 +1,119 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.app.plugin.core.debug.service.rmi.trace;
import java.io.IOException;
import java.net.*;
import ghidra.app.plugin.PluginCategoryNames;
import ghidra.app.plugin.core.debug.DebuggerPluginPackage;
import ghidra.app.plugin.core.debug.event.TraceActivatedPluginEvent;
import ghidra.app.plugin.core.debug.event.TraceClosedPluginEvent;
import ghidra.app.services.TraceRmiService;
import ghidra.framework.plugintool.*;
import ghidra.framework.plugintool.util.PluginStatus;
import ghidra.util.task.ConsoleTaskMonitor;
import ghidra.util.task.TaskMonitor;
@PluginInfo(
shortDescription = "Connect to back-end debuggers via Trace RMI",
description = """
Provides an alternative for connecting to back-end debuggers. The DebuggerModel has
become a bit onerous to implement. Despite its apparent flexibility, the recorder at
the front-end imposes many restrictions, and getting it to work turns into a lot of
guess work and frustration. Trace RMI should offer a more direct means of recording a
trace from a back-end.
""",
category = PluginCategoryNames.DEBUGGER,
packageName = DebuggerPluginPackage.NAME,
status = PluginStatus.RELEASED,
eventsConsumed = {
TraceActivatedPluginEvent.class,
TraceClosedPluginEvent.class,
},
servicesProvided = {
TraceRmiService.class,
})
public class TraceRmiPlugin extends Plugin implements TraceRmiService {
private static final int DEFAULT_PORT = 15432;
private final TaskMonitor monitor = new ConsoleTaskMonitor();
private SocketAddress serverAddress = new InetSocketAddress("0.0.0.0", DEFAULT_PORT);
private TraceRmiServer server;
public TraceRmiPlugin(PluginTool tool) {
super(tool);
}
public TaskMonitor getTaskMonitor() {
// TODO: Create one in the Debug Console?
return monitor;
}
@Override
public SocketAddress getServerAddress() {
if (server != null) {
// In case serverAddress is ephemeral, get its actual address
return server.getAddress();
}
return serverAddress;
}
@Override
public void setServerAddress(SocketAddress serverAddress) {
if (server != null) {
throw new IllegalStateException("Cannot change server address while it is started");
}
this.serverAddress = serverAddress;
}
@Override
public void startServer() throws IOException {
if (server != null) {
throw new IllegalStateException("Server is already started");
}
server = new TraceRmiServer(this, serverAddress);
server.start();
}
@Override
public void stopServer() {
if (server != null) {
server.close();
}
server = null;
}
@Override
public boolean isServerStarted() {
return server != null;
}
@Override
@SuppressWarnings("resource")
public TraceRmiHandler connect(SocketAddress address) throws IOException {
Socket socket = new Socket();
socket.connect(address);
return new TraceRmiHandler(this, socket);
}
@Override
public TraceRmiAcceptor acceptOne(SocketAddress address) throws IOException {
TraceRmiAcceptor acceptor = new TraceRmiAcceptor(this, address);
acceptor.start();
return acceptor;
}
}

View File

@ -0,0 +1,99 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.app.plugin.core.debug.service.rmi.trace;
import java.io.IOException;
import java.net.*;
import ghidra.util.Msg;
public class TraceRmiServer {
protected final TraceRmiPlugin plugin;
protected final SocketAddress address;
protected ServerSocket socket;
public TraceRmiServer(TraceRmiPlugin plugin, SocketAddress address) {
this.plugin = plugin;
this.address = address;
}
protected void bind() throws IOException {
socket.bind(address);
}
public void start() throws IOException {
socket = new ServerSocket();
bind();
new Thread(this::serviceLoop, "trace-rmi server " + socket.getLocalSocketAddress()).start();
}
public void setTimeout(int millis) throws SocketException {
socket.setSoTimeout(millis);
}
/**
* Accept a connection and handle its requests.
*
* <p>
* This launches a new thread to handle the requests. The thread remains alive until the socket
* is closed by either side.
*
* @return the handler
* @throws IOException on error
*/
@SuppressWarnings("resource")
protected TraceRmiHandler accept() throws IOException {
Socket client = socket.accept();
TraceRmiHandler handler = new TraceRmiHandler(plugin, client);
handler.start();
return handler;
}
protected void serviceLoop() {
try {
accept();
}
catch (IOException e) {
if (socket.isClosed()) {
return;
}
Msg.error("Error accepting TraceRmi client", e);
return;
}
finally {
try {
socket.close();
}
catch (IOException e) {
Msg.error("Error closing TraceRmi service", e);
}
}
}
public void close() {
try {
socket.close();
}
catch (IOException e) {
Msg.error("Error closing TraceRmi service", e);
}
}
public SocketAddress getAddress() {
return socket.getLocalSocketAddress();
}
}

View File

@ -0,0 +1,95 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.app.plugin.core.debug.service.rmi.trace;
import org.apache.commons.lang3.ArrayUtils;
import ghidra.program.model.address.*;
import ghidra.rmi.trace.TraceRmi.*;
public interface ValueDecoder {
ValueDecoder DEFAULT = new ValueDecoder() {};
default Address toAddress(Addr addr, boolean required) {
if (required) {
throw new IllegalStateException("Address requires a trace for context");
}
return null;
}
default AddressRange toRange(AddrRange range, boolean required)
throws AddressOverflowException {
if (required) {
throw new IllegalStateException("AddressRange requires a trace for context");
}
return null;
}
default Object getObject(ObjSpec spec, boolean required) {
if (required) {
throw new IllegalStateException("TraceObject requires a trace for context");
}
return null;
}
default Object getObject(ObjDesc desc, boolean required) {
if (required) {
throw new IllegalStateException("TraceObject requires a trace for context");
}
return null;
}
default Object toValue(Value value) throws AddressOverflowException {
return switch (value.getValueCase()) {
case NULL_VALUE -> null;
case BOOL_VALUE -> value.getBoolValue();
case BYTE_VALUE -> (byte) value.getByteValue();
case CHAR_VALUE -> (char) value.getCharValue();
case SHORT_VALUE -> (short) value.getShortValue();
case INT_VALUE -> value.getIntValue();
case LONG_VALUE -> value.getLongValue();
case STRING_VALUE -> value.getStringValue();
case BOOL_ARR_VALUE -> ArrayUtils.toPrimitive(
value.getBoolArrValue().getArrList().stream().toArray(Boolean[]::new));
case BYTES_VALUE -> value.getBytesValue().toByteArray();
case CHAR_ARR_VALUE -> value.getCharArrValue().toCharArray();
case SHORT_ARR_VALUE -> ArrayUtils.toPrimitive(
value.getShortArrValue()
.getArrList()
.stream()
.map(Integer::shortValue)
.toArray(Short[]::new));
case INT_ARR_VALUE -> value.getIntArrValue()
.getArrList()
.stream()
.mapToInt(Integer::intValue)
.toArray();
case LONG_ARR_VALUE -> value.getLongArrValue()
.getArrList()
.stream()
.mapToLong(Long::longValue)
.toArray();
case STRING_ARR_VALUE -> value.getStringArrValue()
.getArrList()
.toArray(String[]::new);
case ADDRESS_VALUE -> toAddress(value.getAddressValue(), true);
case RANGE_VALUE -> toRange(value.getRangeValue(), true);
case CHILD_SPEC -> getObject(value.getChildSpec(), true);
case CHILD_DESC -> getObject(value.getChildDesc(), true);
default -> throw new AssertionError("Unrecognized value: " + value);
};
}
}

View File

@ -0,0 +1,22 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.app.plugin.core.debug.service.rmi.trace;
import ghidra.program.model.address.AddressOverflowException;
public interface ValueSupplier {
Object get(ValueDecoder decoder) throws AddressOverflowException;
}

View File

@ -0,0 +1,51 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.app.services;
import java.io.IOException;
import java.net.SocketAddress;
import ghidra.app.plugin.core.debug.service.rmi.trace.TraceRmiAcceptor;
import ghidra.app.plugin.core.debug.service.rmi.trace.TraceRmiHandler;
public interface TraceRmiService {
SocketAddress getServerAddress();
/**
* Set the server address and port
*
* @param serverAddress may be null to bind to ephemeral port
*/
void setServerAddress(SocketAddress serverAddress);
void startServer() throws IOException;
void stopServer();
boolean isServerStarted();
TraceRmiHandler connect(SocketAddress address) throws IOException;
/**
* Accept a single connection by listening on the given address
*
* @param address the socket address to bind, or null for ephemeral
* @return the acceptor, which can be used to retrieve the ephemeral address and accept the
* actual connection
* @throws IOException on error
*/
TraceRmiAcceptor acceptOne(SocketAddress address) throws IOException;
}

View File

@ -0,0 +1,525 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package ghidra.rmi.trace;
message FilePath {
string path = 1;
}
message DomObjId {
uint32 id = 1;
}
message TxId {
int32 id = 1;
}
message ObjPath {
string path = 1;
}
message Language {
string id = 1;
}
message Compiler {
string id = 1;
}
message Addr {
string space = 1;
uint64 offset = 2;
}
message AddrRange {
string space = 1;
uint64 offset = 2;
uint64 extend = 3;
}
message Snap {
int64 snap = 1;
}
message Span {
int64 min = 1;
int64 max = 2;
}
message Box {
Span span = 1;
AddrRange range = 2;
}
message ReplyError {
string message = 1;
}
// Trace operations
message RequestCreateTrace {
FilePath path = 1;
Language language = 2;
Compiler compiler = 3;
DomObjId oid = 4;
}
message ReplyCreateTrace {
}
message RequestSaveTrace {
DomObjId oid = 1;
}
message ReplySaveTrace {
}
message RequestCloseTrace {
DomObjId oid = 1;
}
message ReplyCloseTrace {
}
message RequestStartTx {
DomObjId oid = 1;
bool undoable = 2;
string description = 3;
TxId txid = 4;
}
message ReplyStartTx {
}
message RequestEndTx {
DomObjId oid = 1;
TxId txid = 2;
bool abort = 3;
}
message ReplyEndTx {
}
// Memory operations
message RequestCreateOverlaySpace {
DomObjId oid = 1;
string baseSpace = 2;
string name = 3;
}
message ReplyCreateOverlaySpace {
}
enum MemoryState {
MS_UNKNOWN = 0;
MS_KNOWN = 1;
MS_ERROR = 2;
}
message RequestSetMemoryState {
DomObjId oid = 1;
Snap snap = 2;
AddrRange range = 3;
MemoryState state = 4;
}
message ReplySetMemoryState {
}
message RequestPutBytes {
DomObjId oid = 1;
Snap snap = 2;
Addr start = 3;
bytes data = 4;
}
message ReplyPutBytes {
int32 written = 1;
}
message RequestDeleteBytes {
DomObjId oid = 1;
Snap snap = 2;
AddrRange range = 3;
}
message ReplyDeleteBytes {
}
message RegVal {
string name = 1;
bytes value = 2;
}
message RequestPutRegisterValue {
DomObjId oid = 1;
Snap snap = 2;
string space = 3;
repeated RegVal values = 4;
}
message ReplyPutRegisterValue {
repeated string skipped_names = 1;
}
message RequestDeleteRegisterValue {
DomObjId oid = 1;
Snap snap = 2;
string space = 3;
repeated string names = 4;
}
message ReplyDeleteRegisterValue {
}
// Object operations
message ObjSpec {
oneof key {
int64 id = 1;
ObjPath path = 2;
}
}
message ObjDesc {
int64 id = 1;
ObjPath path = 2;
}
message ValSpec {
ObjSpec parent = 1;
Span span = 2;
string key = 3;
Value value = 4;
}
message ValDesc {
ObjDesc parent = 1;
Span span = 2;
string key = 3;
Value value = 4;
}
message Null {
}
message BoolArr {
repeated bool arr = 1;
}
message ShortArr {
repeated int32 arr = 1;
}
message IntArr {
repeated int32 arr = 1;
}
message LongArr {
repeated int64 arr = 1;
}
message StringArr {
repeated string arr = 1;
}
message ValueType {
// Names from schema context
string name = 1;
}
message Value {
oneof value {
Null null_value = 1;
bool bool_value = 2;
int32 byte_value = 3;
uint32 char_value = 4;
int32 short_value = 5;
int32 int_value = 6;
int64 long_value = 7;
string string_value = 8;
BoolArr bool_arr_value = 9;
bytes bytes_value = 10;
string char_arr_value = 11;
ShortArr short_arr_value = 12;
IntArr int_arr_value = 13;
LongArr long_arr_value = 14;
StringArr string_arr_value = 15;
Addr address_value = 16;
AddrRange range_value = 17;
ObjSpec child_spec = 18;
ObjDesc child_desc = 19;
}
}
message RequestCreateRootObject {
DomObjId oid = 1;
string schema_context = 2;
string root_schema = 3;
}
message RequestCreateObject {
DomObjId oid = 1;
ObjPath path = 2;
}
message ReplyCreateObject {
ObjSpec object = 1;
}
enum Resolution {
CR_TRUNCATE = 0;
CR_DENY = 1;
CR_ADJUST = 2;
}
message RequestInsertObject {
DomObjId oid = 1;
ObjSpec object = 2;
Span span = 3;
Resolution resolution = 4;
}
message ReplyInsertObject {
Span span = 1;
}
message RequestRemoveObject {
DomObjId oid = 1;
ObjSpec object = 2;
Span span = 3;
bool tree = 4;
}
message ReplyRemoveObject {
}
message RequestSetValue {
DomObjId oid = 1;
ValSpec value = 2;
Resolution resolution = 3;
}
message ReplySetValue {
Span span = 1;
}
enum ValueKinds {
VK_ELEMENTS = 0;
VK_ATTRIBUTES = 1;
VK_BOTH = 2;
}
message RequestRetainValues {
DomObjId oid = 1;
ObjSpec object = 2;
Span span = 3;
ValueKinds kinds = 4;
repeated string keys = 5;
}
message ReplyRetainValues {
}
message RequestGetObject {
DomObjId oid = 1;
ObjSpec object = 2;
}
message ReplyGetObject {
ObjDesc object = 1;
}
message RequestGetValues {
DomObjId oid = 1;
Span span = 2;
ObjPath pattern = 3;
}
message ReplyGetValues {
repeated ValDesc values = 1;
}
message RequestGetValuesIntersecting {
DomObjId oid = 1;
Box box = 2;
}
// Analysis operations
message RequestDisassemble {
DomObjId oid = 1;
Snap snap = 2;
Addr start = 3;
}
message ReplyDisassemble {
int64 length = 1;
}
// UI operations
message RequestActivate {
DomObjId oid = 1;
ObjSpec object = 2;
}
message ReplyActivate {
}
// Snapshots
message RequestSnapshot {
DomObjId oid = 1;
string description = 2;
string datetime = 3;
Snap snap = 4;
}
message ReplySnapshot {
}
// Client commands
message MethodParameter {
string name = 1;
ValueType type = 2;
bool required = 3;
Value default_value = 4;
string display = 5;
string description = 6;
}
message MethodArgument {
string name = 1;
Value value = 2;
}
message Method {
string name = 1;
string action = 2;
string description = 3;
repeated MethodParameter parameters = 4;
// I'd like to make them all void, but I think executing a command and capturing its output
// justifies being able to return a result. It should be used very sparingly.
ValueType return_type = 5;
}
message RequestNegotiate {
string version = 1;
repeated Method methods = 2;
}
message ReplyNegotiate {
}
message XRequestInvokeMethod {
optional DomObjId oid = 1;
string name = 2;
repeated MethodArgument arguments = 3;
}
message XReplyInvokeMethod {
string error = 1;
Value return_value = 2;
}
// Root
message RootMessage {
oneof msg {
ReplyError error = 1;
RequestNegotiate request_negotiate = 2;
ReplyNegotiate reply_negotiate = 3;
RequestCreateTrace request_create_trace = 4;
ReplyCreateTrace reply_create_trace = 5;
RequestSaveTrace request_save_trace = 6;
ReplySaveTrace reply_save_trace = 7;
RequestCloseTrace request_close_trace = 8;
ReplyCloseTrace reply_close_trace = 9;
RequestStartTx request_start_tx = 10;
ReplyStartTx reply_start_tx = 11;
RequestEndTx request_end_tx = 12;
ReplyEndTx reply_end_tx = 13;
RequestCreateOverlaySpace request_create_overlay = 14;
ReplyCreateOverlaySpace reply_create_overlay = 15;
RequestSetMemoryState request_set_memory_state = 16;
ReplySetMemoryState reply_set_memory_state = 17;
RequestPutBytes request_put_bytes = 18;
ReplyPutBytes reply_put_bytes = 19;
RequestDeleteBytes request_delete_bytes = 20;
ReplyDeleteBytes reply_delete_bytes = 21;
RequestPutRegisterValue request_put_register_value = 22;
ReplyPutRegisterValue reply_put_register_value = 23;
RequestDeleteRegisterValue request_delete_register_value = 24;
ReplyDeleteRegisterValue reply_delete_register_value = 25;
RequestCreateRootObject request_create_root_object = 26;
// Use same reply as CreateObject
RequestCreateObject request_create_object = 27;
ReplyCreateObject reply_create_object = 28;
RequestInsertObject request_insert_object = 29;
ReplyInsertObject reply_insert_object = 30;
RequestRemoveObject request_remove_object = 31;
ReplyRemoveObject reply_remove_object = 32;
RequestSetValue request_set_value = 33;
ReplySetValue reply_set_value = 34;
RequestRetainValues request_retain_values = 35;
ReplyRetainValues reply_retain_values = 36;
RequestGetObject request_get_object = 37;
ReplyGetObject reply_get_object = 38;
RequestGetValues request_get_values = 39;
ReplyGetValues reply_get_values = 40;
RequestGetValuesIntersecting request_get_values_intersecting = 41;
// Reuse reply_get_values
RequestDisassemble request_disassemble = 42;
ReplyDisassemble reply_disassemble = 43;
RequestActivate request_activate = 44;
ReplyActivate reply_activate = 45;
RequestSnapshot request_snapshot = 46;
ReplySnapshot reply_snapshot = 47;
XRequestInvokeMethod xrequest_invoke_method = 48;
XReplyInvokeMethod xreply_invoke_method = 49;
}
}

View File

@ -0,0 +1,11 @@
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,3 @@
# Ghidra Trace RMI
Python 3 bindings for Ghidra's Trace RMI.

View File

@ -0,0 +1,25 @@
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[project]
name = "ghidratrace"
version = "10.4"
authors = [
{ name="Ghidra Development Team" },
]
description = "Ghidra's TraceRmi for Python3"
readme = "README.md"
requires-python = ">=3.7"
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
]
dependencies = [
"protobuf >= 3, < 4",
]
[project.urls]
"Homepage" = "https://github.com/NationalSecurityAgency/ghidra"
"Bug Tracker" = "https://github.com/NationalSecurityAgency/ghidra/issues"

View File

@ -0,0 +1,15 @@
## ###
# IP: GHIDRA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,47 @@
## ###
# IP: GHIDRA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from dataclasses import dataclass
# Use instances as type annotations or as schema
@dataclass(frozen=True)
class Schema:
name: str
def __str__(self):
return self.name
ANY = Schema('ANY')
OBJECT = Schema('OBJECT')
VOID = Schema('VOID')
BOOL = Schema('BOOL')
BYTE = Schema('BYTE')
CHAR = Schema('CHAR')
SHORT = Schema('SHORT')
INT = Schema('INT')
LONG = Schema('LONG')
STRING = Schema('STRING')
ADDRESS = Schema('ADDRESS')
RANGE = Schema('RANGE')
BOOL_ARR = Schema('BOOL_ARR')
BYTE_ARR = Schema('BYTE_ARR')
CHAR_ARR = Schema('CHAR_ARR')
SHORT_ARR = Schema('SHORT_ARR')
INT_ARR = Schema('INT_ARR')
LONG_ARR = Schema('LONG_ARR')
STRING_ARR = Schema('STRING_ARR')

View File

@ -0,0 +1,63 @@
## ###
# IP: GHIDRA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import socket
import traceback
def send_all(s, data):
sent = 0
while sent < len(data):
l = s.send(data[sent:])
if l == 0:
raise Exception("Socket closed")
sent += l
def send_length(s, value):
send_all(s, value.to_bytes(4, 'big'))
def send_delimited(s, msg):
data = msg.SerializeToString()
send_length(s, len(data))
send_all(s, data)
def recv_all(s, size):
buf = b''
while len(buf) < size:
part = s.recv(size - len(buf))
if len(part) == 0:
return buf
buf += part
return buf
#return s.recv(size, socket.MSG_WAITALL)
def recv_length(s):
buf = recv_all(s, 4)
if len(buf) < 4:
raise Exception("Socket closed")
return int.from_bytes(buf, 'big')
def recv_delimited(s, msg, dbg_seq):
size = recv_length(s)
buf = recv_all(s, size)
if len(buf) < size:
raise Exception("Socket closed")
msg.ParseFromString(buf)
return msg

View File

@ -569,7 +569,10 @@ public class DebuggerCoordinates {
}
public TraceProgramView getView() {
return view;
if (trace == null) {
return view; // probably null
}
return view == null ? trace.getProgramView() : view;
}
public long getSnap() {

View File

@ -64,6 +64,9 @@ public class CurrentPlatformTraceDisassembleAction extends DockingAction {
TraceObject object = current.getObject();
DebuggerPlatformMapper mapper =
plugin.platformService.getMapper(trace, object, view.getSnap());
if (mapper == null) {
return null;
}
return new Reqs(mapper, thread, object, view);
}

View File

@ -76,7 +76,7 @@ public class DebuggerDisassemblerPlugin extends Plugin implements PopupActionPro
}
}
protected static RegisterValue deriveAlternativeDefaultContext(Language language,
public static RegisterValue deriveAlternativeDefaultContext(Language language,
LanguageID alternative, Address address) {
LanguageService langServ = DefaultLanguageService.getLanguageService();
Language altLang;

View File

@ -248,6 +248,9 @@ public class DebuggerTrackLocationTrait {
// Change of current frame
// Change of tracking settings
DebuggerCoordinates cur = current;
if (cur.getView() == null) {
return AsyncUtils.nil();
}
TraceThread thread = cur.getThread();
if (thread == null || spec == null) {
return AsyncUtils.nil();

View File

@ -306,7 +306,9 @@ public class ObjectsTreePanel extends JPanel {
return;
}
AbstractNode node = getNode(object.getCanonicalPath());
tree.addSelectionPath(node.getTreePath());
if (node != null) {
tree.addSelectionPath(node.getTreePath());
}
}
public void selectCurrent() {

View File

@ -937,7 +937,7 @@ public class DebuggerRegistersProvider extends ComponentProviderAdapter
}
TraceData getRegisterData(Register register) {
TraceCodeSpace space = getRegisterCodeSpace(false);
TraceCodeSpace space = getRegisterCodeSpace(register.getAddressSpace(), false);
if (space == null) {
return null;
}
@ -1153,14 +1153,24 @@ public class DebuggerRegistersProvider extends ComponentProviderAdapter
return getRegisterMemorySpace(current, space, createIfAbsent);
}
protected TraceCodeSpace getRegisterCodeSpace(boolean createIfAbsent) {
TraceThread curThread = current.getThread();
if (curThread == null) {
protected static TraceCodeSpace getRegisterCodeSpace(DebuggerCoordinates coords,
AddressSpace space, boolean createIfAbsent) {
if (!space.isRegisterSpace()) {
return coords.getTrace()
.getCodeManager()
.getCodeSpace(space, createIfAbsent);
}
TraceThread thread = coords.getThread();
if (thread == null) {
return null;
}
return current.getTrace()
return coords.getTrace()
.getCodeManager()
.getCodeRegisterSpace(curThread, current.getFrame(), createIfAbsent);
.getCodeRegisterSpace(thread, coords.getFrame(), createIfAbsent);
}
protected TraceCodeSpace getRegisterCodeSpace(AddressSpace space, boolean createIfAbsent) {
return getRegisterCodeSpace(current, space, createIfAbsent);
}
protected Set<Register> collectBaseRegistersWithKnownValues(TraceThread thread) {

View File

@ -0,0 +1,64 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.app.plugin.core.debug.utils;
import java.io.IOException;
import java.lang.ref.Cleaner;
import ghidra.framework.model.DomainFile;
import ghidra.framework.model.DomainObject;
import ghidra.util.exception.CancelledException;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
public class ManagedDomainObject implements AutoCloseable {
public static final Cleaner CLEANER = Cleaner.create();
private static class ObjectState implements Runnable {
private DomainObject obj;
@Override
public synchronized void run() {
if (obj.getConsumerList().contains(this)) {
obj.release(this);
}
}
public synchronized DomainObject get() {
if (!obj.getConsumerList().contains(this)) {
throw new IllegalStateException("Domain object is closed");
}
return obj;
}
}
private final ObjectState state = new ObjectState();
public ManagedDomainObject(DomainFile file, boolean okToUpgrade, boolean okToRecover,
TaskMonitor monitor) throws VersionException, CancelledException, IOException {
state.obj = file.getDomainObject(state, okToUpgrade, okToRecover, monitor);
CLEANER.register(this, state);
}
@Override
public void close() throws Exception {
state.run();
}
public DomainObject get() {
return state.get();
}
}

View File

@ -26,10 +26,9 @@ import ghidra.program.util.ProgramSelection;
/**
* A service providing access to the main listing panel
*/
@ServiceInfo( //
defaultProvider = DebuggerListingPlugin.class, //
description = "Replacement CodeViewerService for Debugger" //
)
@ServiceInfo(
defaultProvider = DebuggerListingPlugin.class,
description = "Replacement CodeViewerService for Debugger")
public interface DebuggerListingService extends CodeViewerService {
/**

View File

@ -30,5 +30,6 @@ void* work(void* param) {
int main() {
pthread_create(&thread, NULL, work, (void*)1);
sleep(1); // Not ideal, but some assurance that we break with two threads
return (int)work(NULL);
}

View File

@ -53,7 +53,6 @@ public interface TargetMemoryRegion extends TargetObject {
* @return true if write is permitted
*/
@TargetAttributeType(name = WRITABLE_ATTRIBUTE_NAME, required = true, hidden = true)
public default boolean isWritable() {
return getTypedAttributeNowByName(WRITABLE_ATTRIBUTE_NAME, Boolean.class, false);
}
@ -64,7 +63,6 @@ public interface TargetMemoryRegion extends TargetObject {
* @return true if execute is permitted
*/
@TargetAttributeType(name = EXECUTABLE_ATTRIBUTE_NAME, required = true, hidden = true)
public default boolean isExecutable() {
return getTypedAttributeNowByName(EXECUTABLE_ATTRIBUTE_NAME, Boolean.class, false);
}

View File

@ -89,7 +89,16 @@ public enum EnumerableTargetObjectSchema implements TargetObjectSchema {
SET_ATTACH_KIND(TargetAttachKindSet.class),
SET_BREAKPOINT_KIND(TargetBreakpointKindSet.class),
SET_STEP_KIND(TargetStepKindSet.class),
EXECUTION_STATE(TargetExecutionState.class);
EXECUTION_STATE(TargetExecutionState.class),
// Additional types supported by the Trace database
CHAR(Character.class, char.class),
BOOL_ARR(boolean[].class),
BYTE_ARR(byte[].class),
CHAR_ARR(char[].class),
SHORT_ARR(short[].class),
INT_ARR(int[].class),
LONG_ARR(long[].class),
STRING_ARR(String[].class);
public static final class MinimalSchemaContext extends DefaultSchemaContext {
public static final SchemaContext INSTANCE = new MinimalSchemaContext();

View File

@ -25,8 +25,7 @@ import org.junit.Test;
import ghidra.dbg.DebugModelConventions.AsyncAccess;
import ghidra.dbg.error.DebuggerModelTerminatingException;
import ghidra.dbg.target.TargetObject;
import ghidra.dbg.target.schema.EnumerableTargetObjectSchema;
import ghidra.dbg.target.schema.TargetObjectSchema;
import ghidra.dbg.target.schema.*;
import ghidra.util.Msg;
public abstract class AbstractDebuggerModelFactoryTest extends AbstractDebuggerModelTest {
@ -78,6 +77,7 @@ public abstract class AbstractDebuggerModelFactoryTest extends AbstractDebuggerM
TargetObjectSchema rootSchema = m.getModel().getRootSchema();
Msg.info(this, rootSchema.getContext());
Msg.info(this, XmlSchemaContext.serialize(rootSchema.getContext()));
assertFalse(rootSchema instanceof EnumerableTargetObjectSchema);
}

View File

@ -108,7 +108,12 @@ public abstract class AbstractPcodeTraceDataAccess implements InternalPcodeTrace
if (hostRange == null) {
return;
}
getMemoryOps(true).setState(snap, toOverlay(hostRange), state);
TraceMemoryOperations ops = getMemoryOps(true);
if (ops == null) {
throw new AssertionError("Cannot get memory operations for writing. " +
"This usually indicates a schema issue.");
}
ops.setState(snap, toOverlay(hostRange), state);
}
@Override
@ -178,7 +183,12 @@ public abstract class AbstractPcodeTraceDataAccess implements InternalPcodeTrace
if (hostStart == null) {
return 0;
}
return getMemoryOps(true).putBytes(snap, toOverlay(hostStart), buf);
TraceMemoryOperations ops = getMemoryOps(true);
if (ops == null) {
throw new AssertionError("Cannot get memory operations for writing. " +
"This usually indicates a schema issue.");
}
return ops.putBytes(snap, toOverlay(hostStart), buf);
}
@Override

View File

@ -131,9 +131,16 @@ public class DBTraceMemoryBufferEntry extends DBAnnotatedObject {
if (compressed) {
decompress();
}
buffer.put((blockNum << DBTraceMemorySpace.BLOCK_SHIFT) + dstOffset, buf.array(),
buf.arrayOffset() + buf.position(), len);
buf.position(buf.position() + len);
int bufOffset = (blockNum << DBTraceMemorySpace.BLOCK_SHIFT) + dstOffset;
if (buf.isReadOnly()) {
byte[] temp = new byte[len];
buf.get(temp);
buffer.put(bufOffset, temp);
}
else {
buffer.put(bufOffset, buf.array(), buf.arrayOffset() + buf.position(), len);
buf.position(buf.position() + len);
}
return len;
}

View File

@ -645,7 +645,7 @@ public class DBTraceMemorySpace
@Override
public int putBytes(long snap, Address start, ByteBuffer buf) {
assertInSpace(start);
int arrOff = buf.arrayOffset() + buf.position();
int pos = buf.position();
try (LockHold hold = LockHold.lock(lock.writeLock())) {
ByteBuffer oldBytes = ByteBuffer.allocate(buf.remaining());
@ -659,7 +659,8 @@ public class DBTraceMemorySpace
doSetState(snap, start, end, TraceMemoryState.KNOWN);
// Read back the written bytes and fire event
byte[] bytes = Arrays.copyOfRange(buf.array(), arrOff, arrOff + result);
byte[] bytes = new byte[result];
buf.get(pos, bytes);
ImmutableTraceAddressSnapRange tasr = new ImmutableTraceAddressSnapRange(start,
start.add(result - 1), snap, lastSnap.snap);
trace.setChanged(new TraceChangeRecord<>(TraceMemoryBytesChangeType.CHANGED,

View File

@ -95,6 +95,10 @@ public abstract class AbstractDBTraceSpaceBasedManager<M extends DBTraceSpaceBas
return space.isRegisterSpace();
}
private boolean isOverlaySpace() {
return space.isOverlaySpace();
}
private Frame frame() {
return new Frame(thread, entry.frameLevel);
}
@ -145,7 +149,7 @@ public abstract class AbstractDBTraceSpaceBasedManager<M extends DBTraceSpaceBas
Map<Frame, TabledSpace> newRegSpaces = new HashMap<>();
Map<AddressSpace, TabledSpace> newMemSpaces = new HashMap<>();
for (TabledSpace ts : getTabledSpaces()) {
if (ts.isRegisterSpace()) {
if (ts.isRegisterSpace() && !ts.isOverlaySpace()) {
newRegSpaces.put(ts.frame(), ts);
}
else {

View File

@ -223,6 +223,8 @@ public class DBTraceObject extends DBAnnotatedObject implements TraceObject {
}
DBTraceObject parent = doCreateCanonicalParentObject();
InternalTraceObjectValue value = parent.setValue(lifespan, path.key(), this, resolution);
// TODO: Should I re-order the recursion, so values are inserted from root to this?
// TODO: Should child lifespans be allowed to exceed the parent's?
DBTraceObjectValPath path = parent.doInsert(lifespan, resolution);
return path.append(value);
}
@ -505,12 +507,39 @@ public class DBTraceObject extends DBAnnotatedObject implements TraceObject {
}
}
protected Lifespan doAdjust(Lifespan span, String key, Object value) {
// Ordered by min, so I only need to consider the first conflict
// If start is contained in an entry, assume the user means to overwrite it.
for (InternalTraceObjectValue val : doGetValues(span, key)) {
if (Objects.equals(value, val.getValue())) {
continue; // not a conflict
}
if (val.getLifespan().contains(span.min())) {
continue; // user probably wants to overwrite the remainder of this entry
}
// Every entry intersects the span, so if we get one, adjust
return span.withMax(val.getMinSnap() - 1);
}
return span;
}
// TODO: Could/should this return Stream instead?
protected Collection<? extends InternalTraceObjectValue> doGetValues(Lifespan span,
String key) {
return doGetValues(span.lmin(), span.lmax(), key);
}
/**
* The implementation of {@link #getValues(Lifespan, String)}
*
* <p>
* This collects entries ordered by min snap
*
* @param lower the lower snap
* @param upper the upper snap
* @param key the key
* @return the collection of values
*/
protected Collection<? extends InternalTraceObjectValue> doGetValues(long lower, long upper,
String key) {
// Collect triplet-indexed values
@ -746,6 +775,9 @@ public class DBTraceObject extends DBAnnotatedObject implements TraceObject {
if (resolution == ConflictResolution.DENY) {
doCheckConflicts(lifespan, key, value);
}
else if (resolution == ConflictResolution.ADJUST) {
lifespan = doAdjust(lifespan, key, value);
}
var setter = new ValueLifespanSetter(lifespan, value) {
DBTraceObject canonicalLifeChanged = null;

View File

@ -23,6 +23,7 @@ import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.commons.collections4.IteratorUtils;
import org.jdom.JDOMException;
import db.*;
@ -192,7 +193,18 @@ public class DBTraceObjectManager implements TraceObjectManager, DBTraceManager
valueStore.getIndex(DBTraceObject.class, DBTraceObjectValue.CHILD_COLUMN);
objectsView = Collections.unmodifiableCollection(objectStore.asMap().values());
valuesView = Collections.unmodifiableCollection(valueStore.asMap().values());
valuesView = new AbstractCollection<>() {
@Override
public Iterator<TraceObjectValue> iterator() {
return IteratorUtils.chainedIterator(valueStore.asMap().values().iterator(),
rangeValueMap.values().iterator());
}
@Override
public int size() {
return objectStore.getRecordCount() + rangeValueMap.size();
}
};
}
protected void loadRootSchema() {

View File

@ -135,6 +135,9 @@ interface InternalTraceObjectValue extends TraceObjectValue {
if (resolution == ConflictResolution.DENY) {
getParent().doCheckConflicts(lifespan, getEntryKey(), getValue());
}
else if (resolution == ConflictResolution.ADJUST) {
lifespan = getParent().doAdjust(lifespan, getEntryKey(), getValue());
}
new ValueLifespanSetter(lifespan, getValue(), this) {
@Override
protected Iterable<InternalTraceObjectValue> getIntersecting(Long lower,
@ -151,7 +154,8 @@ interface InternalTraceObjectValue extends TraceObjectValue {
}.set(lifespan, getValue());
if (isObject()) {
DBTraceObject child = getChild();
child.emitEvents(new TraceChangeRecord<>(TraceObjectChangeType.LIFE_CHANGED, null, child));
child.emitEvents(
new TraceChangeRecord<>(TraceObjectChangeType.LIFE_CHANGED, null, child));
}
}
}

View File

@ -21,7 +21,6 @@ import ghidra.program.model.lang.Register;
import ghidra.program.model.util.CodeUnitInsertionException;
import ghidra.trace.model.Lifespan;
import ghidra.trace.model.guest.TracePlatform;
import ghidra.trace.util.TraceRegisterUtils;
/**
* A view of defined data units
@ -74,8 +73,8 @@ public interface TraceDefinedDataView extends TraceBaseDefinedUnitsView<TraceDat
*/
default TraceData create(Lifespan lifespan, Register register, DataType dataType)
throws CodeUnitInsertionException {
TraceRegisterUtils.requireByteBound(register);
return create(lifespan, register.getAddress(), dataType, register.getNumBytes());
return create(getTrace().getPlatformManager().getHostPlatform(), lifespan, register,
dataType);
}
/**

View File

@ -47,6 +47,13 @@ public interface TraceObject extends TraceUniqueObject {
*/
Trace getTrace();
/**
* Get the database key for this object
*
* @return the key
*/
long getKey();
/**
* Get the root of the tree containing this object
*
@ -171,7 +178,11 @@ public interface TraceObject extends TraceUniqueObject {
* Throw {@link DuplicateKeyException} if the specified lifespan would result in conflicting
* entries
*/
DENY;
DENY,
/**
* Adjust the new entry to fit into the span available, possibly ignoring it altogether
*/
ADJUST;
}
/**

View File

@ -48,11 +48,19 @@ public interface TraceObjectInterface {
@Transitional
default long computeMinSnap() {
return computeSpan().lmin();
Lifespan span = computeSpan();
if (span == null) {
return 0;
}
return span.lmin();
}
@Transitional
default long computeMaxSnap() {
return computeSpan().lmax();
Lifespan span = computeSpan();
if (span == null) {
return 0;
}
return span.lmax();
}
}

View File

@ -61,6 +61,17 @@ public interface TraceObjectValue {
*/
Object getValue();
/**
* A convenience to get and cast the value, without checking
*
* @param <T> the desired type
* @return the value
*/
@SuppressWarnings("unchecked")
default <T> T castValue() {
return (T) getValue();
}
/**
* Get the value as an object
*

View File

@ -26,11 +26,13 @@ import java.nio.charset.CharsetEncoder;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Collection;
import java.util.List;
import db.Transaction;
import db.DBHandle;
import db.Transaction;
import generic.theme.GThemeDefaults.Colors.Messages;
import ghidra.app.plugin.processors.sleigh.SleighLanguage;
import ghidra.dbg.util.PathPredicates;
import ghidra.pcode.exec.*;
import ghidra.pcode.exec.trace.TraceSleighUtils;
import ghidra.program.disassemble.Disassembler;
@ -52,6 +54,7 @@ import ghidra.trace.model.*;
import ghidra.trace.model.guest.TraceGuestPlatform;
import ghidra.trace.model.guest.TracePlatform;
import ghidra.trace.model.symbol.TraceReferenceManager;
import ghidra.trace.model.target.*;
import ghidra.trace.model.thread.TraceThread;
import ghidra.util.Msg;
import ghidra.util.database.DBOpenMode;
@ -750,6 +753,60 @@ public class ToyDBTraceBuilder implements AutoCloseable {
return getLanguage(langID).getCompilerSpecByID(new CompilerSpecID(compID));
}
/**
* Get an object by its canonical path
*
* @param canonicalPath the canonical path
* @return the object or null
*/
public TraceObject obj(String canonicalPath) {
return trace.getObjectManager()
.getObjectByCanonicalPath(TraceObjectKeyPath.parse(canonicalPath));
}
/**
* Get an object by its path pattern
*
* @param path the path pattern
* @return the object or null
*/
public TraceObject objAny(String pat) {
return objAny(pat, Lifespan.at(0));
}
public TraceObject objAny(String path, Lifespan span) {
return trace.getObjectManager().getObjectsByPath(span, TraceObjectKeyPath.parse(path))
.findFirst()
.orElse(null);
}
/**
* Get the value (not value entry) of an object
*
* @param obj the object
* @param snap the snapshot key
* @param key the entry key
* @return the value, possibly null
*/
public Object objValue(TraceObject obj, long snap, String key) {
TraceObjectValue value = obj.getValue(snap, key);
return value == null ? null : value.getValue();
}
/**
* List all values matching the given pattern at the given stnap.
*
* @param snap the snap
* @param pattern the pattern
* @return the list of values
*/
public List<Object> objValues(long snap, String pattern) {
return trace.getObjectManager()
.getValuePaths(Lifespan.at(snap), PathPredicates.parse(pattern))
.map(p -> p.getDestinationValue(trace.getObjectManager().getRootObject()))
.toList();
}
@Override
public void close() {
if (trace.getConsumerList().contains(this)) {

View File

@ -929,6 +929,7 @@ public class DBCachedObjectStoreFactory {
PrimitiveCodec<String[]> STRING_ARR =
new ArrayObjectCodec<>(new LengthBoundCodec<>(STRING));
// TODO: No floats?
Map<Byte, PrimitiveCodec<?>> CODECS_BY_SELECTOR = Stream
.of(BOOL, BYTE, CHAR, SHORT, INT, LONG, STRING, BOOL_ARR, BYTE_ARR, CHAR_ARR,
SHORT_ARR, INT_ARR, LONG_ARR, STRING_ARR)

View File

@ -0,0 +1,501 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package agent.gdb.rmi;
import static org.hamcrest.Matchers.startsWith;
import static org.junit.Assert.*;
import java.io.*;
import java.net.*;
import java.nio.file.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.junit.Before;
import org.junit.BeforeClass;
import ghidra.app.plugin.core.debug.gui.AbstractGhidraHeadedDebuggerGUITest;
import ghidra.app.plugin.core.debug.service.rmi.trace.*;
import ghidra.app.plugin.core.debug.utils.ManagedDomainObject;
import ghidra.app.services.TraceRmiService;
import ghidra.dbg.target.TargetExecutionStateful.TargetExecutionState;
import ghidra.dbg.testutil.DummyProc;
import ghidra.framework.TestApplicationUtils;
import ghidra.framework.main.ApplicationLevelOnlyPlugin;
import ghidra.framework.model.DomainFile;
import ghidra.framework.plugintool.Plugin;
import ghidra.framework.plugintool.util.*;
import ghidra.program.model.address.Address;
import ghidra.program.model.address.AddressRangeImpl;
import ghidra.trace.model.breakpoint.TraceBreakpointKind;
import ghidra.trace.model.breakpoint.TraceBreakpointKind.TraceBreakpointKindSet;
import ghidra.trace.model.target.*;
import ghidra.util.Msg;
import ghidra.util.NumericUtilities;
public abstract class AbstractGdbTraceRmiTest extends AbstractGhidraHeadedDebuggerGUITest {
// Connecting should be the first thing the script does, so use a tight timeout.
protected static final int CONNECT_TIMEOUT_MS = 3000;
protected static final int TIMEOUT_SECONDS = 300;
protected static final int QUIT_TIMEOUT_MS = 1000;
public static final String INSTRUMENT_STOPPED = """
ghidra trace tx-open "Fake" 'ghidra trace create-obj Inferiors[1]'
define do-set-stopped
ghidra trace set-value Inferiors[1] _state '"STOPPED"'
end
define set-stopped
ghidra trace tx-open Stopped do-set-stopped
end
python gdb.events.stop.connect(lambda e: gdb.execute("set-stopped"))""";
public static final String INSTRUMENT_RUNNING = """
ghidra trace tx-open "Fake" 'ghidra trace create-obj Inferiors[1]'
define do-set-running
ghidra trace set-value Inferiors[1] _state '"RUNNING"'
end
define set-running
ghidra trace tx-open Running do-set-running
end
python gdb.events.cont.connect(lambda e: gdb.execute("set-running"))""";
protected TraceRmiService traceRmi;
private Path gdbPath;
private Path outFile;
private Path errFile;
@BeforeClass
public static void setupPython() throws Throwable {
new ProcessBuilder("gradle", "Debugger-agent-gdb:installPyPackage")
.directory(TestApplicationUtils.getInstallationDirectory())
.inheritIO()
.start()
.waitFor();
}
protected Path getGdbPath() {
return Paths.get(DummyProc.which("gdb"));
}
@Before
public void setupTraceRmi() throws Throwable {
traceRmi = addPlugin(tool, TraceRmiPlugin.class);
gdbPath = getGdbPath();
outFile = Files.createTempFile("gdbout", null);
errFile = Files.createTempFile("gdberr", null);
}
protected void addAllDebuggerPlugins() throws PluginException {
PluginsConfiguration plugConf = new PluginsConfiguration() {
@Override
protected boolean accepts(Class<? extends Plugin> pluginClass) {
return !ApplicationLevelOnlyPlugin.class.isAssignableFrom(pluginClass);
}
};
for (PluginDescription pd : plugConf
.getPluginDescriptions(PluginPackage.getPluginPackage("Debugger"))) {
addPlugin(tool, pd.getPluginClass());
}
}
protected static String addrToStringForGdb(InetAddress address) {
if (address.isAnyLocalAddress()) {
return "127.0.0.1"; // Can't connect to 0.0.0.0 as such. Choose localhost.
}
return address.getHostAddress();
}
protected static String sockToStringForGdb(SocketAddress address) {
if (address instanceof InetSocketAddress tcp) {
return addrToStringForGdb(tcp.getAddress()) + ":" + tcp.getPort();
}
throw new AssertionError("Unhandled address type " + address);
}
protected record GdbResult(boolean timedOut, int exitCode, String stdout, String stderr) {
protected String handle() {
if (!"".equals(stderr) | 0 != exitCode) {
throw new GdbError(exitCode, stdout, stderr);
}
return stdout;
}
}
protected record ExecInGdb(Process gdb, CompletableFuture<GdbResult> future) {
}
@SuppressWarnings("resource") // Do not close stdin
protected ExecInGdb execInGdb(String script) throws IOException {
ProcessBuilder pb = new ProcessBuilder(gdbPath.toString());
// If commands come from file, GDB will quit after EOF.
Msg.info(this, "outFile: " + outFile);
Msg.info(this, "errFile: " + errFile);
pb.redirectInput(ProcessBuilder.Redirect.PIPE);
pb.redirectOutput(outFile.toFile());
pb.redirectError(errFile.toFile());
Process gdbProc = pb.start();
OutputStream stdin = gdbProc.getOutputStream();
stdin.write(script.getBytes());
stdin.flush();
return new ExecInGdb(gdbProc, CompletableFuture.supplyAsync(() -> {
try {
if (!gdbProc.waitFor(TIMEOUT_SECONDS, TimeUnit.SECONDS)) {
Msg.error(this, "Timed out waiting for GDB");
gdbProc.destroyForcibly();
gdbProc.waitFor(TIMEOUT_SECONDS, TimeUnit.SECONDS);
return new GdbResult(true, -1, Files.readString(outFile),
Files.readString(errFile));
}
Msg.info(this, "GDB exited with code " + gdbProc.exitValue());
return new GdbResult(false, gdbProc.exitValue(), Files.readString(outFile),
Files.readString(errFile));
}
catch (Exception e) {
return ExceptionUtils.rethrow(e);
}
finally {
gdbProc.destroyForcibly();
}
}));
}
protected static class GdbError extends RuntimeException {
public final int exitCode;
public final String stdout;
public final String stderr;
public GdbError(int exitCode, String stdout, String stderr) {
super("""
exitCode=%d:
----stdout----
%s
----stderr----
%s
""".formatted(exitCode, stdout, stderr));
this.exitCode = exitCode;
this.stdout = stdout;
this.stderr = stderr;
}
}
protected String runThrowError(String script) throws Exception {
CompletableFuture<GdbResult> result = execInGdb(script).future;
return result.get(TIMEOUT_SECONDS, TimeUnit.SECONDS).handle();
}
protected record GdbAndHandler(ExecInGdb exec, TraceRmiHandler handler)
implements AutoCloseable {
protected RemoteMethod getMethod(String name) {
return Objects.requireNonNull(handler.getMethods().get(name));
}
public void execute(String cmd) {
RemoteMethod execute = getMethod("execute");
execute.invoke(Map.of("cmd", cmd));
}
public RemoteAsyncResult executeAsync(String cmd) {
RemoteMethod execute = getMethod("execute");
return execute.invokeAsync(Map.of("cmd", cmd));
}
public String executeCapture(String cmd) {
RemoteMethod execute = getMethod("execute");
return (String) execute.invoke(Map.of("cmd", cmd, "to_string", true));
}
@Override
public void close() throws Exception {
Msg.info(this, "Cleaning up gdb");
try {
try {
RemoteAsyncResult asyncQuit = executeAsync("quit");
try {
asyncQuit.get(QUIT_TIMEOUT_MS, TimeUnit.MILLISECONDS);
}
catch (TimeoutException e) {
/**
* This seems like a bug in gdb. AFAICT, it's a rehash or regression of
* https://sourceware.org/bugzilla/show_bug.cgi?id=17247. If I attach to the
* hung gdb, I get a similar stack trace, but with Python frames on the
* stack. The workaround given in the comments works here, too. I hesitate
* to point fingers, though, because I'm testing with a modern gdb-13.1
* compiled from source on a rather un-modern distro.
*/
Msg.warn(this, "gdb hung on quit. Sending SIGCONT.");
Runtime.getRuntime().exec("kill -SIGCONT %d".formatted(exec.gdb.pid()));
asyncQuit.get(QUIT_TIMEOUT_MS, TimeUnit.MILLISECONDS);
}
}
catch (TraceRmiError e) {
// expected
}
catch (ExecutionException e) {
if (!(e.getCause() instanceof TraceRmiError)) {
throw e;
}
}
GdbResult r = exec.future.get(TIMEOUT_SECONDS, TimeUnit.SECONDS);
r.handle();
waitForPass(() -> assertTrue(handler.isClosed()));
}
finally {
exec.gdb.destroyForcibly();
}
}
}
protected GdbAndHandler startAndConnectGdb(Function<String, String> scriptSupplier)
throws Exception {
TraceRmiAcceptor acceptor = traceRmi.acceptOne(null);
ExecInGdb exec = execInGdb(scriptSupplier.apply(sockToStringForGdb(acceptor.getAddress())));
acceptor.setTimeout(CONNECT_TIMEOUT_MS);
try {
TraceRmiHandler handler = acceptor.accept();
return new GdbAndHandler(exec, handler);
}
catch (SocketTimeoutException e) {
exec.gdb.destroyForcibly();
exec.future.get(TIMEOUT_SECONDS, TimeUnit.SECONDS).handle();
throw e;
}
}
protected GdbAndHandler startAndConnectGdb() throws Exception {
return startAndConnectGdb(addr -> """
set python print-stack full
python import ghidragdb
ghidra trace connect %s
""".formatted(addr));
}
@SuppressWarnings("resource")
protected String runThrowError(Function<String, String> scriptSupplier)
throws Exception {
GdbAndHandler conn = startAndConnectGdb(scriptSupplier);
GdbResult r = conn.exec.future.get(TIMEOUT_SECONDS, TimeUnit.SECONDS);
String stdout = r.handle();
waitForPass(() -> assertTrue(conn.handler.isClosed()));
return stdout;
}
protected void waitState(int infnum, Supplier<Long> snapSupplier, TargetExecutionState state) {
TraceObjectKeyPath infPath = TraceObjectKeyPath.parse("Inferiors").index(infnum);
TraceObject inf =
Objects.requireNonNull(tb.trace.getObjectManager().getObjectByCanonicalPath(infPath));
waitForPass(
() -> assertEquals(state.name(), tb.objValue(inf, snapSupplier.get(), "_state")));
waitTxDone();
}
protected void waitStopped() {
waitState(1, () -> 0L, TargetExecutionState.STOPPED);
}
protected void waitRunning() {
waitState(1, () -> 0L, TargetExecutionState.RUNNING);
}
protected String extractOutSection(String out, String head) {
return out.split(head)[1].split("---")[0].replace("(gdb)", "").trim();
}
record MemDump(long address, byte[] data) {
}
protected MemDump parseHexDump(String dump) throws IOException {
// First, get the address. Assume contiguous, so only need top line.
List<String> lines = List.of(dump.split("\n"));
List<String> toksLine0 = List.of(lines.get(0).split("\\s+"));
assertThat(toksLine0.get(0), startsWith("0x"));
long address = Long.decode(toksLine0.get(0));
ByteArrayOutputStream buf = new ByteArrayOutputStream();
for (String l : lines) {
List<String> parts = List.of(l.split(":"));
assertEquals(2, parts.size());
String hex = parts.get(1).replaceAll("\\s*0x", "");
byte[] lineData = NumericUtilities.convertStringToBytes(hex);
assertNotNull("Converted to null: " + hex, parts.get(1));
buf.write(lineData);
}
return new MemDump(address, buf.toByteArray());
}
record RegDump() {
}
protected RegDump parseRegDump(String dump) {
return new RegDump();
}
protected ManagedDomainObject openDomainObject(String path) throws Exception {
DomainFile df = env.getProject().getProjectData().getFile(path);
assertNotNull(df);
return new ManagedDomainObject(df, false, false, monitor);
}
protected ManagedDomainObject waitDomainObject(String path) throws Exception {
DomainFile df;
long start = System.currentTimeMillis();
while (true) {
df = env.getProject().getProjectData().getFile(path);
if (df != null) {
return new ManagedDomainObject(df, false, false, monitor);
}
Thread.sleep(1000);
if (System.currentTimeMillis() - start > 30000) {
throw new TimeoutException("30 seconds expired waiting for domain file");
}
}
}
protected void assertBreakLoc(TraceObjectValue locVal, String key, Address addr, int len,
Set<TraceBreakpointKind> kinds, String expression) throws Exception {
assertEquals(key, locVal.getEntryKey());
TraceObject loc = locVal.getChild();
TraceObject spec = loc.getCanonicalParent(0).getParent();
assertEquals(new AddressRangeImpl(addr, len), loc.getValue(0, "_range").getValue());
assertEquals(TraceBreakpointKindSet.encode(kinds), spec.getValue(0, "_kinds").getValue());
assertEquals(expression, spec.getValue(0, "_expression").getValue());
}
protected void waitTxDone() {
waitFor(() -> tb.trace.getCurrentTransactionInfo() == null);
}
private record Cut(String head, int begin, int end) {
String parseCell(String line) {
int begin = Math.min(line.length(), this.begin);
int end = Math.min(line.length(), this.end);
/**
* NOTE: Do not assert previous char is space.
*
* When breakpoints table spells out locations, Address and What cells are indented and
* no longer align with their column headers.
*/
return line.substring(begin, end).trim();
}
}
protected record Row(Map<String, String> cells) {
private static Row parse(List<Cut> cuts, String line) {
return new Row(
cuts.stream().collect(Collectors.toMap(Cut::head, c -> c.parseCell(line))));
}
public String getCell(String head) {
return cells.get(head);
}
}
protected record Tabular(List<String> headings, List<Row> rows) {
static final Pattern SPACES = Pattern.compile(" *");
static final Pattern WORDS = Pattern.compile("\\w+");
private static List<Cut> findCuts(String header) {
List<Cut> result = new ArrayList<>();
Matcher spaceMatcher = SPACES.matcher(header);
Matcher wordMatcher = WORDS.matcher(header);
int start = 0;
while (start < header.length()) {
if (!spaceMatcher.find(start)) {
throw new AssertionError();
}
start = spaceMatcher.end();
if (start >= header.length()) {
break;
}
if (!wordMatcher.find(start)) {
throw new AssertionError();
}
result.add(new Cut(wordMatcher.group(), wordMatcher.start(), wordMatcher.end()));
start = wordMatcher.end();
}
return result;
}
private static List<Cut> adjustCuts(List<Cut> cuts) {
List<Cut> result = new ArrayList<>();
for (int i = 0; i < cuts.size(); i++) {
Cut cut = cuts.get(i);
int j = i + 1;
int end = j < cuts.size() ? cuts.get(j).begin : Integer.MAX_VALUE;
result.add(new Cut(cut.head, cut.begin, end));
}
return result;
}
/**
* Parse a table.
*
* <p>
* This is far from perfect, but good enough for making assertions in tests. For example, in
* the breakpoints table, gdb may insert an extra informational line under a breakpoint row.
* This line will get mangled and parsed as if it were an entry. However, it's "Num" cell
* will be empty, so they will not likely interfere.
*
* @param out the output in tabular form
* @return the table object, more or less
*/
public static Tabular parse(String out) {
List<String> lines = List.of(out.split("\n"));
if (lines.isEmpty()) {
throw new AssertionError("Output is not tabular");
}
List<Cut> cuts = adjustCuts(findCuts(lines.get(0)));
return new Tabular(cuts.stream().map(Cut::head).toList(),
lines.stream().skip(1).map(l -> Row.parse(cuts, l)).toList());
}
public Row findRow(String head, String contents) {
return rows.stream()
.filter(r -> Objects.equals(contents, r.getCell(head)))
.findFirst()
.orElse(null);
}
}
public static void waitForPass(Runnable runnable, long timeoutMs, long retryDelayMs) {
long start = System.currentTimeMillis();
AssertionError lastError = null;
while (System.currentTimeMillis() - start < timeoutMs) {
try {
runnable.run();
return;
}
catch (AssertionError e) {
lastError = e;
}
try {
Thread.sleep(retryDelayMs);
}
catch (InterruptedException e) {
// Retry sooner, I guess.
}
}
if (lastError == null) {
throw new AssertionError("Timed out before first try?");
}
throw lastError;
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,425 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package agent.gdb.rmi;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.instanceOf;
import static org.junit.Assert.*;
import java.nio.ByteBuffer;
import java.util.List;
import org.junit.Ignore;
import org.junit.Test;
import agent.gdb.model.GdbLinuxSpecimen;
import ghidra.app.plugin.core.debug.utils.ManagedDomainObject;
import ghidra.dbg.target.TargetExecutionStateful.TargetExecutionState;
import ghidra.dbg.testutil.DummyProc;
import ghidra.dbg.util.PathPattern;
import ghidra.dbg.util.PathPredicates;
import ghidra.program.model.address.AddressSpace;
import ghidra.trace.database.ToyDBTraceBuilder;
import ghidra.trace.model.Trace;
import ghidra.trace.model.memory.TraceMemorySpace;
import ghidra.trace.model.target.TraceObject;
import ghidra.trace.model.time.TraceSnapshot;
public class GdbHooksTest extends AbstractGdbTraceRmiTest {
private static final long RUN_TIMEOUT_MS = 20000;
private static final long RETRY_MS = 500;
record GdbAndTrace(GdbAndHandler conn, ManagedDomainObject mdo) implements AutoCloseable {
public void execute(String cmd) {
conn.execute(cmd);
}
public String executeCapture(String cmd) {
return conn.executeCapture(cmd);
}
@Override
public void close() throws Exception {
conn.close();
mdo.close();
}
}
@SuppressWarnings("resource")
protected GdbAndTrace startAndSyncGdb() throws Exception {
GdbAndHandler conn = startAndConnectGdb();
try {
// TODO: Why does using 'set arch' cause a hang at quit?
conn.execute("""
set ghidra-language x86:LE:64:default
ghidra trace start
ghidra trace sync-enable""");
ManagedDomainObject mdo = waitDomainObject("/New Traces/gdb/noname");
tb = new ToyDBTraceBuilder((Trace) mdo.get());
return new GdbAndTrace(conn, mdo);
}
catch (Exception e) {
conn.close();
throw e;
}
}
@Test
public void testOnNewInferior() throws Exception {
try (GdbAndTrace conn = startAndSyncGdb()) {
conn.execute("add-inferior");
waitForPass(() -> assertEquals(2, tb.objValues(0, "Inferiors[]").size()));
}
}
protected String getIndex(TraceObject object, String pattern) {
if (object == null) {
return null;
}
PathPattern pat = PathPredicates.parse(pattern).getSingletonPattern();
if (pat.countWildcards() != 1) {
throw new IllegalArgumentException("Exactly one wildcard required");
}
List<String> path = object.getCanonicalPath().getKeyList();
if (path.size() < pat.asPath().size()) {
return null;
}
List<String> matched = pat.matchKeys(path.subList(0, pat.asPath().size()));
if (matched == null) {
return null;
}
return matched.get(0);
}
protected String inferiorIndex(TraceObject object) {
return getIndex(object, "Inferiors[]");
}
@Test
public void testOnInferiorSelected() throws Exception {
try (GdbAndTrace conn = startAndSyncGdb()) {
traceManager.openTrace(tb.trace);
// Both inferiors must have sync enabled
conn.execute("""
add-inferior
inferior 2
ghidra trace sync-enable""");
conn.execute("inferior 1");
waitForPass(() -> assertEquals("1", inferiorIndex(traceManager.getCurrentObject())));
conn.execute("inferior 2");
waitForPass(() -> assertEquals("2", inferiorIndex(traceManager.getCurrentObject())));
conn.execute("inferior 1");
waitForPass(() -> assertEquals("1", inferiorIndex(traceManager.getCurrentObject())));
}
}
@Test
public void testOnInferiorDeleted() throws Exception {
try (GdbAndTrace conn = startAndSyncGdb()) {
conn.execute("add-inferior");
waitForPass(() -> assertEquals(2, tb.objValues(0, "Inferiors[]").size()));
conn.execute("remove-inferior 2");
waitForPass(() -> assertEquals(1, tb.objValues(0, "Inferiors[]").size()));
}
}
protected long lastSnap(GdbAndTrace conn) {
return conn.conn.handler().getLastSnapshot(tb.trace);
}
@Test
public void testOnNewThread() throws Exception {
try (GdbAndTrace conn = startAndSyncGdb()) {
conn.execute("""
file %s
break work
start""".formatted(GdbLinuxSpecimen.CLONE_EXIT.getCommandLine()));
waitForPass(() -> {
TraceObject inf = tb.obj("Inferiors[1]");
assertNotNull(inf);
assertEquals("STOPPED", tb.objValue(inf, lastSnap(conn), "_state"));
}, RUN_TIMEOUT_MS, RETRY_MS);
waitForPass(() -> assertEquals(1,
tb.objValues(lastSnap(conn), "Inferiors[1].Threads[]").size()),
RUN_TIMEOUT_MS, RETRY_MS);
conn.execute("continue");
waitForPass(() -> assertEquals(2,
tb.objValues(lastSnap(conn), "Inferiors[1].Threads[]").size()),
RUN_TIMEOUT_MS, RETRY_MS);
}
}
protected String threadIndex(TraceObject object) {
return getIndex(object, "Inferiors[1].Threads[]");
}
@Test
public void testOnThreadSelected() throws Exception {
String cloneExit = DummyProc.which("expCloneExit");
try (GdbAndTrace conn = startAndSyncGdb()) {
traceManager.openTrace(tb.trace);
conn.execute("""
file %s
break work
run""".formatted(cloneExit));
waitForPass(() -> {
TraceObject inf = tb.obj("Inferiors[1]");
assertNotNull(inf);
assertEquals("STOPPED", tb.objValue(inf, lastSnap(conn), "_state"));
}, RUN_TIMEOUT_MS, RETRY_MS);
waitForPass(() -> assertEquals(2,
tb.objValues(lastSnap(conn), "Inferiors[1].Threads[]").size()),
RUN_TIMEOUT_MS, RETRY_MS);
// Now the real test
conn.execute("thread 1");
waitForPass(() -> assertEquals("1", threadIndex(traceManager.getCurrentObject())));
conn.execute("thread 2");
waitForPass(() -> assertEquals("2", threadIndex(traceManager.getCurrentObject())));
conn.execute("thread 1");
waitForPass(() -> assertEquals("1", threadIndex(traceManager.getCurrentObject())));
}
}
protected String frameIndex(TraceObject object) {
return getIndex(object, "Inferiors[1].Threads[1].Stack[]");
}
@Test
public void testOnFrameSelected() throws Exception {
String stack = DummyProc.which("expStack");
try (GdbAndTrace conn = startAndSyncGdb()) {
traceManager.openTrace(tb.trace);
conn.execute("""
file %s
break break_here
run""".formatted(stack));
waitForPass(() -> assertThat(
tb.objValues(lastSnap(conn), "Inferiors[1].Threads[1].Stack[]").size(),
greaterThan(2)),
RUN_TIMEOUT_MS, RETRY_MS);
conn.execute("frame 1");
waitForPass(() -> assertEquals("1", frameIndex(traceManager.getCurrentObject())),
RUN_TIMEOUT_MS, RETRY_MS);
conn.execute("frame 0");
waitForPass(() -> assertEquals("0", frameIndex(traceManager.getCurrentObject())),
RUN_TIMEOUT_MS, RETRY_MS);
}
}
@Test
@Ignore
public void testOnSyscallMemory() throws Exception {
// TODO: Need a specimen
// FWIW, I've already seen this getting exercised in other tests.
}
@Test
public void testOnMemoryChanged() throws Exception {
try (GdbAndTrace conn = startAndSyncGdb()) {
conn.execute("""
file bash
start""");
long address = Long.decode(conn.executeCapture("print/x &main").split("\\s+")[2]);
conn.execute("set *((char*) &main) = 0x7f");
waitForPass(() -> {
ByteBuffer buf = ByteBuffer.allocate(1);
tb.trace.getMemoryManager().getBytes(lastSnap(conn), tb.addr(address), buf);
assertEquals(0x7f, buf.get(0));
}, RUN_TIMEOUT_MS, RETRY_MS);
}
}
@Test
public void testOnRegisterChanged() throws Exception {
try (GdbAndTrace conn = startAndSyncGdb()) {
conn.execute("""
file bash
start""");
TraceObject thread = waitForValue(() -> tb.obj("Inferiors[1].Threads[1]"));
waitForPass(
() -> assertEquals("STOPPED", tb.objValue(thread, lastSnap(conn), "_state")),
RUN_TIMEOUT_MS, RETRY_MS);
conn.execute("set $rax = 0x1234");
AddressSpace space = tb.trace.getBaseAddressFactory()
.getAddressSpace("Inferiors[1].Threads[1].Stack[0].Registers");
TraceMemorySpace regs = tb.trace.getMemoryManager().getMemorySpace(space, false);
waitForPass(() -> assertEquals("1234",
regs.getValue(lastSnap(conn), tb.reg("RAX")).getUnsignedValue().toString(16)));
}
}
@Test
public void testOnCont() throws Exception {
try (GdbAndTrace conn = startAndSyncGdb()) {
conn.execute("""
file bash
run""");
TraceObject inf = waitForValue(() -> tb.obj("Inferiors[1]"));
TraceObject thread = waitForValue(() -> tb.obj("Inferiors[1].Threads[1]"));
waitForPass(() -> {
assertEquals("RUNNING", tb.objValue(inf, lastSnap(conn), "_state"));
assertEquals("RUNNING", tb.objValue(thread, lastSnap(conn), "_state"));
}, RUN_TIMEOUT_MS, RETRY_MS);
}
}
@Test
public void testOnStop() throws Exception {
try (GdbAndTrace conn = startAndSyncGdb()) {
conn.execute("""
file bash
start""");
TraceObject inf = waitForValue(() -> tb.obj("Inferiors[1]"));
TraceObject thread = waitForValue(() -> tb.obj("Inferiors[1].Threads[1]"));
waitForPass(() -> {
assertEquals("STOPPED", tb.objValue(inf, lastSnap(conn), "_state"));
assertEquals("STOPPED", tb.objValue(thread, lastSnap(conn), "_state"));
}, RUN_TIMEOUT_MS, RETRY_MS);
}
}
@Test
public void testOnExited() throws Exception {
try (GdbAndTrace conn = startAndSyncGdb()) {
conn.execute("""
file bash
set args -c "exit 1"
run""");
waitForPass(() -> {
TraceSnapshot snapshot =
tb.trace.getTimeManager().getSnapshot(lastSnap(conn), false);
assertNotNull(snapshot);
assertEquals("Exited with code 1", snapshot.getDescription());
TraceObject inf1 = tb.obj("Inferiors[1]");
assertNotNull(inf1);
Object val = tb.objValue(inf1, lastSnap(conn), "_exit_code");
assertThat(val, instanceOf(Number.class));
assertEquals(1, ((Number) val).longValue());
}, RUN_TIMEOUT_MS, RETRY_MS);
}
}
/**
* Test on_clear_objfiles, on_new_objfile, on_free_objfile.
*
* <p>
* Technically, this probably doesn't hit on_free_objfile, but all three just call
* modules_changed, so I'm not concerned.
*/
@Test
public void testOnEventsObjfiles() throws Exception {
String print = DummyProc.which("expPrint");
String modPrint = "Inferiors[1].Modules[%s]".formatted(print);
try (GdbAndTrace conn = startAndSyncGdb()) {
conn.execute("""
file %s
start""".formatted(print));
waitForPass(() -> assertEquals(1, tb.objValues(lastSnap(conn), modPrint).size()),
RUN_TIMEOUT_MS, RETRY_MS);
conn.execute("continue");
waitState(1, () -> lastSnap(conn), TargetExecutionState.TERMINATED);
/**
* Termination does not clear objfiles. Not until we run a new target.
*/
conn.execute("""
file bash
set args -c "exit 1"
run""");
waitForPass(() -> assertEquals(0, tb.objValues(lastSnap(conn), modPrint).size()),
RUN_TIMEOUT_MS, RETRY_MS);
}
}
@Test
public void testOnBreakpointCreated() throws Exception {
String print = DummyProc.which("expPrint");
try (GdbAndTrace conn = startAndSyncGdb()) {
conn.execute("file " + print);
assertEquals(0, tb.objValues(lastSnap(conn), "Breakpoints[]").size());
conn.execute("break main");
waitForPass(() -> {
List<Object> brks = tb.objValues(lastSnap(conn), "Breakpoints[]");
assertEquals(1, brks.size());
return (TraceObject) brks.get(0);
});
}
}
@Test
public void testOnBreakpointModified() throws Exception {
String print = DummyProc.which("expPrint");
try (GdbAndTrace conn = startAndSyncGdb()) {
conn.execute("file " + print);
assertEquals(0, tb.objValues(lastSnap(conn), "Breakpoints[]").size());
conn.execute("break main");
TraceObject brk = waitForPass(() -> {
List<Object> brks = tb.objValues(lastSnap(conn), "Breakpoints[]");
assertEquals(1, brks.size());
return (TraceObject) brks.get(0);
});
assertEquals(null, tb.objValue(brk, lastSnap(conn), "Commands"));
conn.execute("""
commands %s
echo test
end""".formatted(brk.getCanonicalPath().index()));
waitForPass(
() -> assertEquals("echo test\n", tb.objValue(brk, lastSnap(conn), "Commands")));
}
}
@Test
public void testOnBreakpointDeleted() throws Exception {
String print = DummyProc.which("expPrint");
try (GdbAndTrace conn = startAndSyncGdb()) {
conn.execute("file " + print);
assertEquals(0, tb.objValues(lastSnap(conn), "Breakpoints[]").size());
conn.execute("break main");
TraceObject brk = waitForPass(() -> {
List<Object> brks = tb.objValues(lastSnap(conn), "Breakpoints[]");
assertEquals(1, brks.size());
return (TraceObject) brks.get(0);
});
conn.execute("delete %s".formatted(brk.getCanonicalPath().index()));
waitForPass(
() -> assertEquals(0, tb.objValues(lastSnap(conn), "Breakpoints[]").size()));
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,509 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package agent.lldb.rmi;
import static org.hamcrest.Matchers.startsWith;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.net.SocketTimeoutException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.Function;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.junit.Before;
import org.junit.BeforeClass;
import ghidra.app.plugin.core.debug.gui.AbstractGhidraHeadedDebuggerGUITest;
import ghidra.app.plugin.core.debug.service.rmi.trace.RemoteAsyncResult;
import ghidra.app.plugin.core.debug.service.rmi.trace.RemoteMethod;
import ghidra.app.plugin.core.debug.service.rmi.trace.TraceRmiAcceptor;
import ghidra.app.plugin.core.debug.service.rmi.trace.TraceRmiHandler;
import ghidra.app.plugin.core.debug.service.rmi.trace.TraceRmiPlugin;
import ghidra.app.plugin.core.debug.utils.ManagedDomainObject;
import ghidra.app.services.TraceRmiService;
import ghidra.dbg.testutil.DummyProc;
import ghidra.framework.TestApplicationUtils;
import ghidra.framework.main.ApplicationLevelOnlyPlugin;
import ghidra.framework.model.DomainFile;
import ghidra.framework.plugintool.Plugin;
import ghidra.framework.plugintool.util.*;
import ghidra.program.model.address.Address;
import ghidra.program.model.address.AddressRangeImpl;
import ghidra.trace.model.Lifespan;
import ghidra.trace.model.breakpoint.TraceBreakpointKind;
import ghidra.trace.model.breakpoint.TraceBreakpointKind.TraceBreakpointKindSet;
import ghidra.trace.model.target.TraceObject;
import ghidra.trace.model.target.TraceObjectValue;
import ghidra.util.Msg;
import ghidra.util.NumericUtilities;
public abstract class AbstractLldbTraceRmiTest extends AbstractGhidraHeadedDebuggerGUITest {
// Connecting should be the first thing the script does, so use a tight timeout.
protected static final int CONNECT_TIMEOUT_MS = 3000;
protected static final int TIMEOUT_SECONDS = 300;
protected static final int QUIT_TIMEOUT_MS = 1000;
public static final String INSTRUMENT_STOPPED =
"""
ghidra_trace_txopen "Fake" 'ghidra_trace_create_obj Processes[1]'
define do-set-stopped
ghidra_trace_set_value Processes[1] _state '"STOPPED"'
end
define set-stopped
ghidra_trace_txopen Stopped do-set-stopped
end
#lldb.debugger.HandleCommand('target stop-hook add -P ghidralldb.hooks.StopHook')
#python lldb.events.stop.connect(lambda e: lldb.execute("set-stopped"))""";
public static final String INSTRUMENT_RUNNING =
"""
ghidra_trace_txopen "Fake" 'ghidra_trace_create_obj Processes[1]'
define do-set-running
ghidra_trace_set_value Processes[1] _state '"RUNNING"'
end
define set-running
ghidra_trace_txopen Running do-set-running
end
#lldb.debugger.HandleCommand('target stop-hook add -P ghidralldb.hooks.StopHook')
#python lldb.events.cont.connect(lambda e: lldb.execute("set-running"))""";
protected TraceRmiService traceRmi;
private Path lldbPath;
private Path outFile;
private Path errFile;
@BeforeClass
public static void setupPython() throws Throwable {
new ProcessBuilder("gradle", "Debugger-agent-lldb:installPyPackage")
.directory(TestApplicationUtils.getInstallationDirectory())
.inheritIO()
.start()
.waitFor();
}
@Before
public void setupTraceRmi() throws Throwable {
traceRmi = addPlugin(tool, TraceRmiPlugin.class);
lldbPath = Paths.get(DummyProc.which("lldb"));
outFile = Files.createTempFile("lldbout", null);
errFile = Files.createTempFile("lldberr", null);
}
protected void addAllDebuggerPlugins() throws PluginException {
PluginsConfiguration plugConf = new PluginsConfiguration() {
@Override
protected boolean accepts(Class<? extends Plugin> pluginClass) {
return !ApplicationLevelOnlyPlugin.class.isAssignableFrom(pluginClass);
}
};
for (PluginDescription pd : plugConf
.getPluginDescriptions(PluginPackage.getPluginPackage("Debugger"))) {
addPlugin(tool, pd.getPluginClass());
}
}
protected static String addrToStringForLldb(InetAddress address) {
if (address.isAnyLocalAddress()) {
return "127.0.0.1"; // Can't connect to 0.0.0.0 as such. Choose localhost.
}
return address.getHostAddress();
}
protected static String sockToStringForLldb(SocketAddress address) {
if (address instanceof InetSocketAddress tcp) {
return addrToStringForLldb(tcp.getAddress()) + ":" + tcp.getPort();
}
throw new AssertionError("Unhandled address type " + address);
}
protected record LldbResult(boolean timedOut, int exitCode, String stdout, String stderr) {
protected String handle() {
if (!"".equals(stderr) || (0 != exitCode && 143 != exitCode)) {
throw new LldbError(exitCode, stdout, stderr);
}
return stdout;
}
}
protected record ExecInLldb(Process lldb, CompletableFuture<LldbResult> future) {
}
@SuppressWarnings("resource") // Do not close stdin
protected ExecInLldb execInLldb(String script) throws IOException {
ProcessBuilder pb = new ProcessBuilder(lldbPath.toString());
// If commands come from file, LLDB will quit after EOF.
Msg.info(this, "outFile: " + outFile);
Msg.info(this, "errFile: " + errFile);
pb.redirectInput(ProcessBuilder.Redirect.PIPE);
pb.redirectOutput(outFile.toFile());
pb.redirectError(errFile.toFile());
Process lldbProc = pb.start();
OutputStream stdin = lldbProc.getOutputStream();
stdin.write(script.getBytes());
stdin.flush();
return new ExecInLldb(lldbProc, CompletableFuture.supplyAsync(() -> {
try {
if (!lldbProc.waitFor(TIMEOUT_SECONDS, TimeUnit.SECONDS)) {
Msg.error(this, "Timed out waiting for LLDB");
lldbProc.destroyForcibly();
lldbProc.waitFor(TIMEOUT_SECONDS, TimeUnit.SECONDS);
return new LldbResult(true, -1, Files.readString(outFile),
Files.readString(errFile));
}
Msg.info(this, "LLDB exited with code " + lldbProc.exitValue());
return new LldbResult(false, lldbProc.exitValue(), Files.readString(outFile),
Files.readString(errFile));
}
catch (Exception e) {
return ExceptionUtils.rethrow(e);
}
finally {
lldbProc.destroyForcibly();
}
}));
}
public static class LldbError extends RuntimeException {
public final int exitCode;
public final String stdout;
public final String stderr;
public LldbError(int exitCode, String stdout, String stderr) {
super("""
exitCode=%d:
----stdout----
%s
----stderr----
%s
""".formatted(exitCode, stdout, stderr));
this.exitCode = exitCode;
this.stdout = stdout;
this.stderr = stderr;
}
}
protected String runThrowError(String script) throws Exception {
CompletableFuture<LldbResult> result = execInLldb(script).future;
return result.get(TIMEOUT_SECONDS, TimeUnit.SECONDS).handle();
}
protected record LldbAndHandler(ExecInLldb exec, TraceRmiHandler handler)
implements AutoCloseable {
protected RemoteMethod getMethod(String name) {
return Objects.requireNonNull(handler.getMethods().get(name));
}
public void execute(String cmd) {
RemoteMethod execute = getMethod("execute");
execute.invoke(Map.of("cmd", cmd));
}
public RemoteAsyncResult executeAsync(String cmd) {
RemoteMethod execute = getMethod("execute");
return execute.invokeAsync(Map.of("cmd", cmd));
}
public String executeCapture(String cmd) {
RemoteMethod execute = getMethod("execute");
return (String) execute.invoke(Map.of("cmd", cmd, "to_string", true));
}
@Override
public void close() throws Exception {
Msg.info(this, "Cleaning up lldb");
exec.lldb().destroy();
try {
LldbResult r = exec.future.get(TIMEOUT_SECONDS, TimeUnit.SECONDS);
r.handle();
waitForPass(() -> assertTrue(handler.isClosed()));
}
finally {
exec.lldb.destroyForcibly();
}
}
}
protected LldbAndHandler startAndConnectLldb(Function<String, String> scriptSupplier)
throws Exception {
TraceRmiAcceptor acceptor = traceRmi.acceptOne(null);
ExecInLldb exec =
execInLldb(scriptSupplier.apply(sockToStringForLldb(acceptor.getAddress())));
acceptor.setTimeout(CONNECT_TIMEOUT_MS);
try {
TraceRmiHandler handler = acceptor.accept();
return new LldbAndHandler(exec, handler);
}
catch (SocketTimeoutException e) {
exec.lldb.destroyForcibly();
exec.future.get(TIMEOUT_SECONDS, TimeUnit.SECONDS).handle();
throw e;
}
}
protected LldbAndHandler startAndConnectLldb() throws Exception {
return startAndConnectLldb(addr -> """
script import ghidralldb
ghidra_trace_connect %s
""".formatted(addr));
}
@SuppressWarnings("resource")
protected String runThrowError(Function<String, String> scriptSupplier)
throws Exception {
LldbAndHandler conn = startAndConnectLldb(scriptSupplier);
LldbResult r = conn.exec.future.get(TIMEOUT_SECONDS, TimeUnit.SECONDS);
String stdout = r.handle();
waitForPass(() -> assertTrue(conn.handler.isClosed()));
return stdout;
}
protected void waitStopped() {
TraceObject proc = Objects.requireNonNull(tb.objAny("Processes[]", Lifespan.at(0)));
waitForPass(() -> assertEquals("STOPPED", tb.objValue(proc, 0, "_state")));
waitTxDone();
}
protected void waitRunning() {
TraceObject proc = Objects.requireNonNull(tb.objAny("Processes[]", Lifespan.at(0)));
waitForPass(() -> assertEquals("RUNNING", tb.objValue(proc, 0, "_state")));
waitTxDone();
}
protected String extractOutSection(String out, String head) {
String[] split = out.split("\n");
String xout = "";
for (String s : split) {
if (!s.startsWith("(lldb)") && !s.equals("")) {
xout += s + "\n";
}
}
return xout.split(head)[1].split("---")[0].replace("(lldb)", "").trim();
}
record MemDump(long address, byte[] data) {
}
protected MemDump parseHexDump(String dump) throws IOException {
// First, get the address. Assume contiguous, so only need top line.
List<String> lines = List.of(dump.split("\n"));
List<String> toksLine0 = List.of(lines.get(0).split("\\s+"));
assertThat(toksLine0.get(0), startsWith("0x"));
String addrstr = toksLine0.get(0);
if (addrstr.contains(":")) {
addrstr = addrstr.substring(0, addrstr.indexOf(":"));
}
long address = Long.decode(addrstr);
ByteArrayOutputStream buf = new ByteArrayOutputStream();
for (String l : lines) {
List<String> parts = List.of(l.split(":"));
assertEquals(2, parts.size());
String hex = parts.get(1).replaceAll("\\s*0x", "");
byte[] lineData = NumericUtilities.convertStringToBytes(hex);
assertNotNull("Converted to null: " + hex, parts.get(1));
buf.write(lineData);
}
return new MemDump(address, buf.toByteArray());
}
record RegDump() {
}
protected RegDump parseRegDump(String dump) {
return new RegDump();
}
protected ManagedDomainObject openDomainObject(String path) throws Exception {
DomainFile df = env.getProject().getProjectData().getFile(path);
assertNotNull(df);
return new ManagedDomainObject(df, false, false, monitor);
}
protected ManagedDomainObject waitDomainObject(String path) throws Exception {
DomainFile df;
long start = System.currentTimeMillis();
while (true) {
df = env.getProject().getProjectData().getFile(path);
if (df != null) {
return new ManagedDomainObject(df, false, false, monitor);
}
Thread.sleep(1000);
if (System.currentTimeMillis() - start > 30000) {
throw new TimeoutException("30 seconds expired waiting for domain file");
}
}
}
protected void assertBreakLoc(TraceObjectValue locVal, String key, Address addr, int len,
Set<TraceBreakpointKind> kinds, String expression) throws Exception {
assertEquals(key, locVal.getEntryKey());
TraceObject loc = locVal.getChild();
TraceObject spec = loc.getCanonicalParent(0).getParent();
assertEquals(new AddressRangeImpl(addr, len), loc.getValue(0, "_range").getValue());
assertEquals(TraceBreakpointKindSet.encode(kinds), spec.getValue(0, "_kinds").getValue());
assertTrue(spec.getValue(0, "_expression").getValue().toString().contains(expression));
}
protected void assertWatchLoc(TraceObjectValue locVal, String key, Address addr, int len,
Set<TraceBreakpointKind> kinds, String expression) throws Exception {
assertEquals(key, locVal.getEntryKey());
TraceObject loc = locVal.getChild();
assertEquals(new AddressRangeImpl(addr, len), loc.getValue(0, "_range").getValue());
assertEquals(TraceBreakpointKindSet.encode(kinds), loc.getValue(0, "_kinds").getValue());
}
protected void waitTxDone() {
waitFor(() -> tb.trace.getCurrentTransactionInfo() == null);
}
private record Cut(String head, int begin, int end) {
String parseCell(String line) {
int begin = Math.min(line.length(), this.begin);
int end = Math.min(line.length(), this.end);
/**
* NOTE: Do not assert previous char is space.
*
* When breakpoints table spells out locations, Address and What cells are indented and
* no longer align with their column headers.
*/
return line.substring(begin, end).trim();
}
}
protected record Row(Map<String, String> cells) {
private static Row parse(List<Cut> cuts, String line) {
return new Row(
cuts.stream().collect(Collectors.toMap(Cut::head, c -> c.parseCell(line))));
}
public String getCell(String head) {
return cells.get(head);
}
}
protected record Tabular(List<String> headings, List<Row> rows) {
static final Pattern SPACES = Pattern.compile(" *");
static final Pattern WORDS = Pattern.compile("\\w+");
private static List<Cut> findCuts(String header) {
List<Cut> result = new ArrayList<>();
Matcher spaceMatcher = SPACES.matcher(header);
Matcher wordMatcher = WORDS.matcher(header);
int start = 0;
while (start < header.length()) {
if (!spaceMatcher.find(start)) {
throw new AssertionError();
}
start = spaceMatcher.end();
if (start >= header.length()) {
break;
}
if (!wordMatcher.find(start)) {
throw new AssertionError();
}
result.add(new Cut(wordMatcher.group(), wordMatcher.start(), wordMatcher.end()));
start = wordMatcher.end();
}
return result;
}
private static List<Cut> adjustCuts(List<Cut> cuts) {
List<Cut> result = new ArrayList<>();
for (int i = 0; i < cuts.size(); i++) {
Cut cut = cuts.get(i);
int j = i + 1;
int end = j < cuts.size() ? cuts.get(j).begin : Integer.MAX_VALUE;
result.add(new Cut(cut.head, cut.begin, end));
}
return result;
}
/**
* Parse a table.
*
* <p>
* This is far from perfect, but good enough for making assertions in tests. For example, in
* the breakpoints table, lldb may insert an extra informational line under a breakpoint
* row. This line will get mangled and parsed as if it were an entry. However, it's "Num"
* cell will be empty, so they will not likely interfere.
*
* @param out the output in tabular form
* @return the table object, more or less
*/
public static Tabular parse(String out) {
List<String> lines = List.of(out.split("\n"));
if (lines.isEmpty()) {
throw new AssertionError("Output is not tabular");
}
List<Cut> cuts = adjustCuts(findCuts(lines.get(0)));
return new Tabular(cuts.stream().map(Cut::head).toList(),
lines.stream().skip(1).map(l -> Row.parse(cuts, l)).toList());
}
public Row findRow(String head, String contents) {
return rows.stream()
.filter(r -> Objects.equals(contents, r.getCell(head)))
.findFirst()
.orElse(null);
}
}
public static void waitForPass(Runnable runnable, long timeoutMs, long retryDelayMs) {
long start = System.currentTimeMillis();
AssertionError lastError = null;
while (System.currentTimeMillis() - start < timeoutMs) {
try {
runnable.run();
return;
}
catch (AssertionError e) {
lastError = e;
}
try {
Thread.sleep(retryDelayMs);
}
catch (InterruptedException e) {
// Retry sooner, I guess.
}
}
if (lastError == null) {
throw new AssertionError("Timed out before first try?");
}
throw lastError;
}
}

View File

@ -0,0 +1,407 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package agent.lldb.rmi;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.instanceOf;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Objects;
import org.junit.Ignore;
import org.junit.Test;
import agent.gdb.model.GdbLinuxSpecimen;
import ghidra.app.plugin.core.debug.utils.ManagedDomainObject;
import ghidra.dbg.util.PathPattern;
import ghidra.dbg.util.PathPredicates;
import ghidra.program.model.address.AddressSpace;
import ghidra.trace.database.ToyDBTraceBuilder;
import ghidra.trace.model.Lifespan;
import ghidra.trace.model.Trace;
import ghidra.trace.model.memory.TraceMemorySpace;
import ghidra.trace.model.target.TraceObject;
import ghidra.trace.model.time.TraceSnapshot;
public class LldbHooksTest extends AbstractLldbTraceRmiTest {
private static final long RUN_TIMEOUT_MS = 20000;
private static final long RETRY_MS = 500;
record LldbAndTrace(LldbAndHandler conn, ManagedDomainObject mdo) implements AutoCloseable {
public void execute(String cmd) {
conn.execute(cmd);
}
public String executeCapture(String cmd) {
return conn.executeCapture(cmd);
}
@Override
public void close() throws Exception {
conn.close();
mdo.close();
}
}
@SuppressWarnings("resource")
protected LldbAndTrace startAndSyncLldb() throws Exception {
LldbAndHandler conn = startAndConnectLldb();
try {
// TODO: Why does using 'set arch' cause a hang at quit?
conn.execute("ghidralldb.util.set_convenience_variable('ghidra-language', 'x86:LE:64:default')");
conn.execute("ghidra_trace_start");
ManagedDomainObject mdo = waitDomainObject("/New Traces/lldb/noname");
tb = new ToyDBTraceBuilder((Trace) mdo.get());
return new LldbAndTrace(conn, mdo);
}
catch (Exception e) {
conn.close();
throw e;
}
}
protected long lastSnap(LldbAndTrace conn) {
return conn.conn.handler().getLastSnapshot(tb.trace);
}
// TODO: This passes if you single-step through it but fails on some transactional stuff if run
//@Test
public void testOnNewThread() throws Exception {
try (LldbAndTrace conn = startAndSyncLldb()) {
start(conn, "%s".formatted(GdbLinuxSpecimen.CLONE_EXIT.getCommandLine()));
conn.execute("break set -n work");
waitForPass(() -> {
TraceObject proc = tb.objAny("Processes[]");
assertNotNull(proc);
assertEquals("STOPPED", tb.objValue(proc, lastSnap(conn), "_state"));
}, RUN_TIMEOUT_MS, RETRY_MS);
txPut(conn, "threads");
waitForPass(() -> assertEquals(1,
tb.objValues(lastSnap(conn), "Processes[].Threads[]").size()),
RUN_TIMEOUT_MS, RETRY_MS);
conn.execute("continue");
waitStopped();
txPut(conn, "threads");
waitForPass(() -> assertEquals(2,
tb.objValues(lastSnap(conn), "Processes[].Threads[]").size()),
RUN_TIMEOUT_MS, RETRY_MS);
}
}
// TODO: This passes if you single-step through it but fails on some transactional stuff if run
//@Test
public void testOnThreadSelected() throws Exception {
try (LldbAndTrace conn = startAndSyncLldb()) {
traceManager.openTrace(tb.trace);
start(conn, "%s".formatted(GdbLinuxSpecimen.CLONE_EXIT.getCommandLine()));
conn.execute("break set -n work");
waitForPass(() -> {
TraceObject inf = tb.objAny("Processes[]");
assertNotNull(inf);
assertEquals("STOPPED", tb.objValue(inf, lastSnap(conn), "_state"));
}, RUN_TIMEOUT_MS, RETRY_MS);
txPut(conn, "threads");
waitForPass(() -> assertEquals(1,
tb.objValues(lastSnap(conn), "Processes[].Threads[]").size()),
RUN_TIMEOUT_MS, RETRY_MS);
conn.execute("continue");
waitStopped();
waitForPass(() -> {
TraceObject inf = tb.objAny("Processes[]");
assertNotNull(inf);
assertEquals("STOPPED", tb.objValue(inf, lastSnap(conn), "_state"));
}, RUN_TIMEOUT_MS, RETRY_MS);
waitForPass(() -> assertEquals(2,
tb.objValues(lastSnap(conn), "Processes[].Threads[]").size()),
RUN_TIMEOUT_MS, RETRY_MS);
// Now the real test
conn.execute("thread select 1");
conn.execute("frame select 0");
waitForPass(() -> {
String ti0 = conn.executeCapture("thread info");
assertTrue(ti0.contains("#1"));
String threadIndex = threadIndex(traceManager.getCurrentObject());
assertTrue(ti0.contains(threadIndex));
}, RUN_TIMEOUT_MS, RETRY_MS);
conn.execute("thread select 2");
conn.execute("frame select 0");
waitForPass(() -> {
String ti0 = conn.executeCapture("thread info");
assertTrue(ti0.contains("#2"));
String threadIndex = threadIndex(traceManager.getCurrentObject());
assertTrue(ti0.contains(threadIndex));
}, RUN_TIMEOUT_MS, RETRY_MS);
conn.execute("thread select 1");
conn.execute("frame select 0");
waitForPass(() -> {
String ti0 = conn.executeCapture("thread info");
assertTrue(ti0.contains("#1"));
String threadIndex = threadIndex(traceManager.getCurrentObject());
assertTrue(ti0.contains(threadIndex));
}, RUN_TIMEOUT_MS, RETRY_MS);
}
}
protected String getIndex(TraceObject object, String pattern, int n) {
if (object == null) {
return null;
}
PathPattern pat = PathPredicates.parse(pattern).getSingletonPattern();
// if (pat.countWildcards() != 1) {
// throw new IllegalArgumentException("Exactly one wildcard required");
// }
List<String> path = object.getCanonicalPath().getKeyList();
if (path.size() < pat.asPath().size()) {
return null;
}
List<String> matched = pat.matchKeys(path.subList(0, pat.asPath().size()));
if (matched == null) {
return null;
}
if (matched.size() <= n) {
return null;
}
return matched.get(n);
}
protected String threadIndex(TraceObject object) {
return getIndex(object, "Processes[].Threads[]", 1);
}
protected String frameIndex(TraceObject object) {
return getIndex(object, "Processes[].Threads[].Stack[]", 2);
}
@Test
public void testOnFrameSelected() throws Exception {
try (LldbAndTrace conn = startAndSyncLldb()) {
traceManager.openTrace(tb.trace);
start(conn, "bash");
conn.execute("breakpoint set -n read");
conn.execute("cont");
waitStopped();
waitForPass(() -> assertThat(
tb.objValues(lastSnap(conn), "Processes[].Threads[].Stack[]").size(),
greaterThan(2)),
RUN_TIMEOUT_MS, RETRY_MS);
conn.execute("frame select 1");
waitForPass(() -> assertEquals("1", frameIndex(traceManager.getCurrentObject())),
RUN_TIMEOUT_MS, RETRY_MS);
conn.execute("frame select 0");
waitForPass(() -> assertEquals("0", frameIndex(traceManager.getCurrentObject())),
RUN_TIMEOUT_MS, RETRY_MS);
}
}
@Test
@Ignore
public void testOnSyscallMemory() throws Exception {
// TODO: Need a specimen
// FWIW, I've already seen this getting exercised in other tests.
}
@Test
public void testOnMemoryChanged() throws Exception {
try (LldbAndTrace conn = startAndSyncLldb()) {
start(conn, "bash");
long address = Long.decode(conn.executeCapture("dis -c1 -n main").split("\\s+")[1]);
conn.execute("expr *((char*)(void(*)())main) = 0x7f");
conn.execute("ghidra_trace_txstart 'Tx'");
conn.execute("ghidra_trace_putmem `(void(*)())main` 10");
conn.execute("ghidra_trace_txcommit");
waitForPass(() -> {
ByteBuffer buf = ByteBuffer.allocate(10);
tb.trace.getMemoryManager().getBytes(lastSnap(conn), tb.addr(address), buf);
assertEquals(0x7f, buf.get(0));
}, RUN_TIMEOUT_MS, RETRY_MS);
}
}
@Test
public void testOnRegisterChanged() throws Exception {
try (LldbAndTrace conn = startAndSyncLldb()) {
start(conn, "bash");
conn.execute("expr $rax = 0x1234");
conn.execute("ghidra_trace_txstart 'Tx'");
conn.execute("ghidra_trace_putreg");
conn.execute("ghidra_trace_txcommit");
String path = "Processes[].Threads[].Stack[].Registers";
TraceObject registers = Objects.requireNonNull(tb.objAny(path, Lifespan.at(0)));
AddressSpace space = tb.trace.getBaseAddressFactory()
.getAddressSpace(registers.getCanonicalPath().toString());
TraceMemorySpace regs = tb.trace.getMemoryManager().getMemorySpace(space, false);
waitForPass(() -> assertEquals("1234",
regs.getValue(lastSnap(conn), tb.reg("RAX")).getUnsignedValue().toString(16)));
}
}
@Test
public void testOnCont() throws Exception {
try (LldbAndTrace conn = startAndSyncLldb()) {
start(conn, "bash");
conn.execute("cont");
waitRunning();
TraceObject proc = waitForValue(() -> tb.objAny("Processes[]"));
waitForPass(() -> {
assertEquals("RUNNING", tb.objValue(proc, lastSnap(conn), "_state"));
}, RUN_TIMEOUT_MS, RETRY_MS);
}
}
@Test
public void testOnStop() throws Exception {
try (LldbAndTrace conn = startAndSyncLldb()) {
start(conn, "bash");
TraceObject inf = waitForValue(() -> tb.objAny("Processes[]"));
waitForPass(() -> {
assertEquals("STOPPED", tb.objValue(inf, lastSnap(conn), "_state"));
}, RUN_TIMEOUT_MS, RETRY_MS);
}
}
@Test
public void testOnExited() throws Exception {
try (LldbAndTrace conn = startAndSyncLldb()) {
conn.execute("file bash");
conn.execute("ghidra_trace_sync_enable");
conn.execute("process launch --stop-at-entry -- -c 'exit 1'");
txPut(conn, "processes");
conn.execute("cont");
waitRunning();
waitStopped();
waitForPass(() -> {
TraceSnapshot snapshot =
tb.trace.getTimeManager().getSnapshot(lastSnap(conn), false);
assertNotNull(snapshot);
assertEquals("Exited with code 1", snapshot.getDescription());
TraceObject proc = tb.objAny("Processes[]");
assertNotNull(proc);
Object val = tb.objValue(proc, lastSnap(conn), "_exit_code");
assertThat(val, instanceOf(Number.class));
assertEquals(1, ((Number) val).longValue());
}, RUN_TIMEOUT_MS, RETRY_MS);
}
}
@Test
public void testOnBreakpointCreated() throws Exception {
try (LldbAndTrace conn = startAndSyncLldb()) {
start(conn, "bash");
assertEquals(0, tb.objValues(lastSnap(conn), "Processes[].Breakpoints[]").size());
conn.execute("breakpoint set -n main");
conn.execute("stepi");
waitForPass(() -> {
List<Object> brks = tb.objValues(lastSnap(conn), "Processes[].Breakpoints[]");
assertEquals(1, brks.size());
return (TraceObject) brks.get(0);
});
}
}
@Test
public void testOnBreakpointModified() throws Exception {
try (LldbAndTrace conn = startAndSyncLldb()) {
start(conn, "bash");
assertEquals(0, tb.objValues(lastSnap(conn), "Breakpoints[]").size());
conn.execute("breakpoint set -n main");
conn.execute("stepi");
TraceObject brk = waitForPass(() -> {
List<Object> brks = tb.objValues(lastSnap(conn), "Breakpoints[]");
assertEquals(1, brks.size());
return (TraceObject) brks.get(0);
});
assertEquals(null, tb.objValue(brk, lastSnap(conn), "Condition"));
conn.execute("breakpoint modify -c 'x>3'");
conn.execute("stepi");
// NB: Testing "Commands" requires multi-line input - not clear how to do this
//assertEquals(null, tb.objValue(brk, lastSnap(conn), "Commands"));
//conn.execute("breakpoint command add 'echo test'");
//conn.execute("DONE");
waitForPass(
() -> assertEquals("x>3", tb.objValue(brk, lastSnap(conn), "Condition")));
}
}
@Test
public void testOnBreakpointDeleted() throws Exception {
try (LldbAndTrace conn = startAndSyncLldb()) {
start(conn, "bash");
assertEquals(0, tb.objValues(lastSnap(conn), "Processes[].Breakpoints[]").size());
conn.execute("breakpoint set -n main");
conn.execute("stepi");
TraceObject brk = waitForPass(() -> {
List<Object> brks = tb.objValues(lastSnap(conn), "Processes[].Breakpoints[]");
assertEquals(1, brks.size());
return (TraceObject) brks.get(0);
});
conn.execute("breakpoint delete %s".formatted(brk.getCanonicalPath().index()));
conn.execute("stepi");
waitForPass(
() -> assertEquals(0, tb.objValues(lastSnap(conn), "Processes[].Breakpoints[]").size()));
}
}
private void start(LldbAndTrace conn, String obj) {
conn.execute("file "+obj);
conn.execute("ghidra_trace_sync_enable");
conn.execute("process launch --stop-at-entry");
txPut(conn, "processes");
}
private void txPut(LldbAndTrace conn, String obj) {
conn.execute("ghidra_trace_txstart 'Tx"+obj+"'");
conn.execute("ghidra_trace_put_"+obj);
conn.execute("ghidra_trace_txcommit");
}
}

View File

@ -0,0 +1,94 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*plugins {
id 'com.google.protobuf' version '0.8.10'
}*/
configurations {
allProtocArtifacts
protocArtifact
}
def platform = getCurrentPlatformName()
dependencies {
allProtocArtifacts 'com.google.protobuf:protoc:3.21.8:windows-x86_64@exe'
allProtocArtifacts 'com.google.protobuf:protoc:3.21.8:linux-x86_64@exe'
allProtocArtifacts 'com.google.protobuf:protoc:3.21.8:linux-aarch_64@exe'
allProtocArtifacts 'com.google.protobuf:protoc:3.21.8:osx-x86_64@exe'
allProtocArtifacts 'com.google.protobuf:protoc:3.21.8:osx-aarch_64@exe'
if (isCurrentWindows()) {
protocArtifact 'com.google.protobuf:protoc:3.21.8:windows-x86_64@exe'
}
if (isCurrentLinux()) {
if (platform.endsWith("x86_64")) {
protocArtifact 'com.google.protobuf:protoc:3.21.8:linux-x86_64@exe'
}
else {
protocArtifact 'com.google.protobuf:protoc:3.21.8:linux-aarch_64@exe'
}
}
if (isCurrentMac()) {
if (platform.endsWith("x86_64")) {
protocArtifact 'com.google.protobuf:protoc:3.21.8:osx-x86_64@exe'
}
else {
protocArtifact 'com.google.protobuf:protoc:3.21.8:osx-aarch_64@exe'
}
}
}
/*protobuf {
protoc {
artifact = 'com.google.protobuf:protoc:3.21.8'
}
}*/
task generateProto {
ext.srcdir = file("src/main/proto")
ext.src = fileTree(srcdir) {
include "**/*.proto"
}
ext.outdir = file("build/generated/source/proto/main/java")
outputs.dir(outdir)
inputs.files(src)
dependsOn(configurations.protocArtifact)
doLast {
def exe = configurations.protocArtifact.first()
if (!isCurrentWindows()) {
exe.setExecutable(true)
}
exec {
commandLine exe, "--java_out=$outdir", "-I$srcdir"
args src
}
}
}
tasks.compileJava.dependsOn(tasks.generateProto)
tasks.eclipse.dependsOn(tasks.generateProto)
rootProject.tasks.prepDev.dependsOn(tasks.generateProto)
sourceSets {
main {
java {
srcDir tasks.generateProto.outdir
}
}
}
zipSourceSubproject.dependsOn generateProto

View File

@ -0,0 +1,86 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
configurations {
pypkgInstall
}
task assemblePyPackage(type: Copy) {
from "src/main/py"
into "build/pypkg/"
}
def getGdbPython() {
def out = new ByteArrayOutputStream()
exec {
commandLine "gdb", "--batch"
args "-ex", "python import sys"
args "-ex", "python print(f'python{sys.version_info.major}.{sys.version_info.minor}')"
standardOutput = out
}
return "$out".strip()
}
def getLldbPython() {
def out = new ByteArrayOutputStream()
exec {
commandLine "lldb", "--batch"
args "-ex", "python import sys"
args "-ex", "python print(f'python{sys.version_info.major}.{sys.version_info.minor}')"
standardOutput = out
}
return "$out".strip()
}
task configureBuildPyPackage {
doLast {
def gdbPython = getGdbPython()
buildPyPackage.commandLine gdbPython, "-m", "build"
}
}
task buildPyPackage(type: Exec) {
dependsOn(configureBuildPyPackage)
ext.dist = { file("build/pypkg/dist") }
inputs.files(assemblePyPackage)
outputs.dir(dist)
workingDir { "build/pypkg" }
}
task configureInstallPyPackage {
dependsOn(configurations.pypkgInstall)
doLast {
def gdbPython = getGdbPython()
installPyPackage.commandLine gdbPython, "-m", "pip", "install", "--force-reinstall"
installPyPackage.args configurations.pypkgInstall.filter { f -> !f.name.endsWith(".jar") }
installPyPackage.args file("build/pypkg")
}
}
task installPyPackage(type: Exec) {
dependsOn(configureInstallPyPackage)
inputs.files(assemblePyPackage)
}
task phonyJarPyPackage(type: Jar) {
dependsOn(assemblePyPackage)
}
afterEvaluate {
artifacts {
pypkgInstall file("build/pypkg")
pypkgInstall phonyJarPyPackage
}
}