use crate::{
call_frames::CallFrames,
disassembler, ebpf,
elf::EBpfElf,
error::{EbpfError, UserDefinedError},
jit::{JitProgram, JitProgramArgument},
memory_region::{AccessType, MemoryMapping, MemoryRegion},
user_error::UserError,
};
use log::debug;
use std::{collections::HashMap, fmt::Debug, u32};
pub type Verifier<E> = fn(prog: &[u8]) -> Result<(), E>;
pub type ProgramResult<E> = Result<u64, EbpfError<E>>;
#[macro_export]
macro_rules! question_mark {
( $value:expr, $result:ident ) => {{
let value = $value;
match value {
Err(err) => {
*$result = Err(err.into());
return;
}
Ok(value) => value,
}
}};
}
pub type SyscallFunction<E, O> =
fn(O, u64, u64, u64, u64, u64, &MemoryMapping, &mut ProgramResult<E>);
pub trait SyscallObject<E: UserDefinedError> {
#[allow(clippy::too_many_arguments)]
fn call(
&mut self,
arg1: u64,
arg2: u64,
arg3: u64,
arg4: u64,
arg5: u64,
memory_mapping: &MemoryMapping,
result: &mut ProgramResult<E>,
);
}
#[derive(Debug, PartialEq)]
pub struct Syscall {
pub function: u64,
pub context_object_slot: usize,
}
pub struct DynTraitVtable {
pub drop: fn(*const u8),
pub size: usize,
pub align: usize,
pub methods: [*const u8; 32],
}
#[derive(Clone, Copy)]
pub struct DynTraitFatPointer {
pub data: *mut u8,
pub vtable: &'static DynTraitVtable,
}
#[derive(Debug, PartialEq, Default)]
pub struct SyscallRegistry {
entries: HashMap<u32, Syscall>,
context_object_slots: HashMap<u64, usize>,
}
impl SyscallRegistry {
pub fn register_syscall_by_hash<E: UserDefinedError, O: SyscallObject<E>>(
&mut self,
hash: u32,
function: SyscallFunction<E, &mut O>,
) -> Result<(), EbpfError<E>> {
let function = function as *const u8 as u64;
let context_object_slot = self.entries.len();
if self
.entries
.insert(
hash,
Syscall {
function,
context_object_slot,
},
)
.is_some()
|| self
.context_object_slots
.insert(function, context_object_slot)
.is_some()
{
Err(EbpfError::SycallAlreadyRegistered(hash as usize))
} else {
Ok(())
}
}
pub fn register_syscall_by_name<E: UserDefinedError, O: SyscallObject<E>>(
&mut self,
name: &[u8],
function: SyscallFunction<E, &mut O>,
) -> Result<(), EbpfError<E>> {
self.register_syscall_by_hash(ebpf::hash_symbol_name(name), function)
}
pub fn lookup_syscall(&self, hash: u32) -> Option<&Syscall> {
self.entries.get(&hash)
}
pub fn lookup_context_object_slot(&self, function_pointer: u64) -> Option<usize> {
self.context_object_slots.get(&function_pointer).copied()
}
pub fn get_number_of_syscalls(&self) -> usize {
self.entries.len()
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct Config {
pub max_call_depth: usize,
pub stack_frame_size: usize,
pub enable_instruction_meter: bool,
pub enable_instruction_tracing: bool,
}
impl Default for Config {
fn default() -> Self {
Self {
max_call_depth: 20,
stack_frame_size: 4_096,
enable_instruction_meter: true,
enable_instruction_tracing: false,
}
}
}
pub trait Executable<E: UserDefinedError, I: InstructionMeter>: Send + Sync {
fn get_config(&self) -> &Config;
fn get_text_bytes(&self) -> Result<(u64, &[u8]), EbpfError<E>>;
fn get_ro_sections(&self) -> Result<Vec<(u64, &[u8])>, EbpfError<E>>;
fn get_entrypoint_instruction_offset(&self) -> Result<usize, EbpfError<E>>;
fn register_bpf_function(&mut self, hash: u32, pc: usize);
fn lookup_bpf_function(&self, hash: u32) -> Option<&usize>;
fn get_syscall_registry(&self) -> &SyscallRegistry;
fn set_syscall_registry(&mut self, syscall_registry: SyscallRegistry);
fn get_compiled_program(&self) -> Option<&JitProgram<E, I>>;
fn jit_compile(&mut self) -> Result<(), EbpfError<E>>;
fn report_unresolved_symbol(&self, insn_offset: usize) -> Result<u64, EbpfError<E>>;
fn get_symbols(&self) -> (HashMap<u32, String>, HashMap<usize, (String, usize)>);
}
impl<E: UserDefinedError, I: 'static + InstructionMeter> dyn Executable<E, I> {
pub fn from_elf(
elf_bytes: &[u8],
verifier: Option<Verifier<E>>,
config: Config,
) -> Result<Box<Self>, EbpfError<E>> {
let ebpf_elf = EBpfElf::load(config, elf_bytes)?;
let (_, bytes) = ebpf_elf.get_text_bytes()?;
if let Some(verifier) = verifier {
verifier(bytes)?;
}
Ok(Box::new(ebpf_elf))
}
pub fn from_text_bytes(
text_bytes: &[u8],
verifier: Option<Verifier<E>>,
config: Config,
) -> Result<Box<Self>, EbpfError<E>> {
if let Some(verifier) = verifier {
verifier(text_bytes)?;
}
Ok(Box::new(EBpfElf::new_from_text_bytes(config, text_bytes)))
}
}
pub trait InstructionMeter {
fn consume(&mut self, amount: u64);
fn get_remaining(&self) -> u64;
}
#[derive(Debug, PartialEq)]
pub struct DefaultInstructionMeter {}
impl InstructionMeter for DefaultInstructionMeter {
fn consume(&mut self, _amount: u64) {}
fn get_remaining(&self) -> u64 {
std::i64::MAX as u64
}
}
#[derive(Default, Clone)]
pub struct Tracer {
pub log: Vec<[u64; 12]>,
}
impl Tracer {
pub fn trace(&mut self, state: [u64; 12]) {
self.log.push(state);
}
pub fn write<W: std::fmt::Write>(
&self,
out: &mut W,
program: &[u8],
) -> Result<(), std::fmt::Error> {
let disassembled = disassembler::to_insn_vec(program);
let mut pc_to_instruction_index =
vec![0usize; disassembled.last().map(|ins| ins.ptr + 2).unwrap_or(0)];
for index in 0..disassembled.len() {
pc_to_instruction_index[disassembled[index].ptr] = index;
pc_to_instruction_index[disassembled[index].ptr + 1] = index;
}
for index in 0..self.log.len() {
let entry = &self.log[index];
let ins_index = pc_to_instruction_index[entry[11] as usize];
writeln!(
out,
"{:5?} {:016X?} {:5?}: {}",
index, entry, ins_index, disassembled[ins_index].desc
)?;
}
Ok(())
}
pub fn compare(interpreter: &Self, jit: &Self) -> bool {
let interpreter = interpreter.log.as_slice();
let mut jit = jit.log.as_slice();
if jit.len() > interpreter.len() {
jit = &jit[0..interpreter.len()];
}
interpreter == jit
}
}
macro_rules! translate_memory_access {
($self:ident, $vm_addr:ident, $access_type:expr, $pc:ident, $T:ty) => {
match $self.memory_mapping.map::<UserError>(
$access_type,
$vm_addr,
std::mem::size_of::<$T>() as u64,
) {
Ok(host_addr) => host_addr as *mut $T,
Err(EbpfError::AccessViolation(_pc, access_type, vm_addr, len, regions)) => {
return Err(EbpfError::AccessViolation(
$pc + ebpf::ELF_INSN_DUMP_OFFSET,
access_type,
vm_addr,
len,
regions,
));
}
Err(EbpfError::StackAccessViolation(_pc, access_type, vm_addr, len, stack_frame)) => {
return Err(EbpfError::StackAccessViolation(
$pc + ebpf::ELF_INSN_DUMP_OFFSET,
access_type,
vm_addr,
len,
stack_frame,
));
}
_ => unreachable!(),
}
};
}
pub const SYSCALL_CONTEXT_OBJECTS_OFFSET: usize = 6;
pub struct EbpfVm<'a, E: UserDefinedError, I: InstructionMeter> {
executable: &'a dyn Executable<E, I>,
program: &'a [u8],
program_vm_addr: u64,
memory_mapping: MemoryMapping<'a>,
tracer: Tracer,
syscall_context_objects: Vec<*mut u8>,
syscall_context_object_pool: Vec<Box<dyn SyscallObject<E> + 'a>>,
frames: CallFrames,
last_insn_count: u64,
total_insn_count: u64,
}
impl<'a, E: UserDefinedError, I: InstructionMeter> EbpfVm<'a, E, I> {
pub fn new(
executable: &'a dyn Executable<E, I>,
mem: &mut [u8],
granted_regions: &[MemoryRegion],
) -> Result<EbpfVm<'a, E, I>, EbpfError<E>> {
let config = executable.get_config();
let const_data_regions: Vec<MemoryRegion> =
if let Ok(sections) = executable.get_ro_sections() {
sections
.iter()
.map(|(addr, slice)| MemoryRegion::new_from_slice(slice, *addr, 0, false))
.collect()
} else {
Vec::new()
};
let mut regions: Vec<MemoryRegion> =
Vec::with_capacity(granted_regions.len() + const_data_regions.len() + 3);
regions.extend(granted_regions.iter().cloned());
let frames = CallFrames::new(config.max_call_depth, config.stack_frame_size);
regions.push(frames.get_region().clone());
regions.extend(const_data_regions);
regions.push(MemoryRegion::new_from_slice(
&mem,
ebpf::MM_INPUT_START,
0,
true,
));
let (program_vm_addr, program) = executable.get_text_bytes()?;
regions.push(MemoryRegion::new_from_slice(
program,
program_vm_addr,
0,
false,
));
let number_of_syscalls = executable.get_syscall_registry().get_number_of_syscalls();
let mut vm = EbpfVm {
executable,
program,
program_vm_addr,
memory_mapping: MemoryMapping::new(regions, &config),
tracer: Tracer::default(),
syscall_context_objects: vec![
std::ptr::null_mut();
SYSCALL_CONTEXT_OBJECTS_OFFSET + number_of_syscalls
],
syscall_context_object_pool: Vec::with_capacity(number_of_syscalls),
frames,
last_insn_count: 0,
total_insn_count: 0,
};
unsafe {
libc::memcpy(
vm.syscall_context_objects.as_mut_ptr() as _,
std::mem::transmute::<_, _>(&vm.memory_mapping),
std::mem::size_of::<MemoryMapping>(),
);
}
Ok(vm)
}
pub fn get_total_instruction_count(&self) -> u64 {
self.total_insn_count
}
pub fn get_program(&self) -> &[u8] {
&self.program
}
pub fn get_tracer(&self) -> &Tracer {
&self.tracer
}
pub fn bind_syscall_context_object(
&mut self,
syscall_context_object: Box<dyn SyscallObject<E> + 'a>,
hash: Option<u32>,
) -> Result<(), EbpfError<E>> {
let fat_ptr: DynTraitFatPointer = unsafe { std::mem::transmute(&*syscall_context_object) };
let syscall_registry = self.executable.get_syscall_registry();
let slot = match hash {
Some(hash) => {
syscall_registry
.lookup_syscall(hash)
.ok_or(EbpfError::SyscallNotRegistered(hash as usize))?
.context_object_slot
}
None => syscall_registry
.lookup_context_object_slot(fat_ptr.vtable.methods[0] as u64)
.ok_or(EbpfError::SyscallNotRegistered(
fat_ptr.vtable.methods[0] as usize,
))?,
};
if !self.syscall_context_objects[SYSCALL_CONTEXT_OBJECTS_OFFSET + slot].is_null() {
Err(EbpfError::SyscallAlreadyBound(slot))
} else {
self.syscall_context_objects[SYSCALL_CONTEXT_OBJECTS_OFFSET + slot] = fat_ptr.data;
self.syscall_context_object_pool
.push(syscall_context_object);
Ok(())
}
}
pub fn get_syscall_context_object(&self, syscall_function: usize) -> Option<*mut u8> {
self.executable
.get_syscall_registry()
.lookup_context_object_slot(syscall_function as u64)
.map(|slot| self.syscall_context_objects[SYSCALL_CONTEXT_OBJECTS_OFFSET + slot])
}
pub fn execute_program_interpreted(&mut self, instruction_meter: &mut I) -> ProgramResult<E> {
let initial_insn_count = if self.executable.get_config().enable_instruction_meter {
instruction_meter.get_remaining()
} else {
0
};
let result = self.execute_program_interpreted_inner(instruction_meter);
if self.executable.get_config().enable_instruction_meter {
instruction_meter.consume(self.last_insn_count);
self.total_insn_count = initial_insn_count - instruction_meter.get_remaining();
}
result
}
#[rustfmt::skip]
fn execute_program_interpreted_inner(
&mut self,
instruction_meter: &mut I,
) -> ProgramResult<E> {
const U32MAX: u64 = u32::MAX as u64;
let mut reg: [u64; 11] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, self.frames.get_stack_top()];
if self.memory_mapping.map::<UserError>(AccessType::Store, ebpf::MM_INPUT_START, 1).is_ok() {
reg[1] = ebpf::MM_INPUT_START;
}
let instruction_meter_enabled = self.executable.get_config().enable_instruction_meter;
let instruction_tracing_enabled = self.executable.get_config().enable_instruction_tracing;
let entry = self.executable.get_entrypoint_instruction_offset()?;
let mut next_pc: usize = entry;
let mut remaining_insn_count = if instruction_meter_enabled { instruction_meter.get_remaining() } else { 0 };
let initial_insn_count = remaining_insn_count;
self.last_insn_count = 0;
while next_pc * ebpf::INSN_SIZE + ebpf::INSN_SIZE <= self.program.len() {
let pc = next_pc;
next_pc += 1;
let insn = ebpf::get_insn_unchecked(self.program, pc);
let dst = insn.dst as usize;
let src = insn.src as usize;
self.last_insn_count += 1;
if instruction_tracing_enabled {
let mut state = [0u64; 12];
state[0..11].copy_from_slice(®);
state[11] = pc as u64;
self.tracer.trace(state);
}
match insn.opc {
ebpf::LD_ABS_B => {
let vm_addr = ebpf::MM_INPUT_START.wrapping_add(insn.imm as u32 as u64);
let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Load, pc, u8);
reg[0] = unsafe { *host_ptr as u64 };
},
ebpf::LD_ABS_H => {
let vm_addr = ebpf::MM_INPUT_START.wrapping_add(insn.imm as u32 as u64);
let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Load, pc, u16);
reg[0] = unsafe { *host_ptr as u64 };
},
ebpf::LD_ABS_W => {
let vm_addr = ebpf::MM_INPUT_START.wrapping_add(insn.imm as u32 as u64);
let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Load, pc, u32);
reg[0] = unsafe { *host_ptr as u64 };
},
ebpf::LD_ABS_DW => {
let vm_addr = ebpf::MM_INPUT_START.wrapping_add(insn.imm as u32 as u64);
let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Load, pc, u64);
reg[0] = unsafe { *host_ptr as u64 };
},
ebpf::LD_IND_B => {
let vm_addr = ebpf::MM_INPUT_START.wrapping_add(reg[src]).wrapping_add(insn.imm as u32 as u64);
let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Load, pc, u8);
reg[0] = unsafe { *host_ptr as u64 };
},
ebpf::LD_IND_H => {
let vm_addr = ebpf::MM_INPUT_START.wrapping_add(reg[src]).wrapping_add(insn.imm as u32 as u64);
let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Load, pc, u16);
reg[0] = unsafe { *host_ptr as u64 };
},
ebpf::LD_IND_W => {
let vm_addr = ebpf::MM_INPUT_START.wrapping_add(reg[src]).wrapping_add(insn.imm as u32 as u64);
let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Load, pc, u32);
reg[0] = unsafe { *host_ptr as u64 };
},
ebpf::LD_IND_DW => {
let vm_addr = ebpf::MM_INPUT_START.wrapping_add(reg[src]).wrapping_add(insn.imm as u32 as u64);
let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Load, pc, u64);
reg[0] = unsafe { *host_ptr as u64 };
},
ebpf::LD_DW_IMM => {
let next_insn = ebpf::get_insn(self.program, next_pc);
next_pc += 1;
reg[dst] = (insn.imm as u32) as u64 + ((next_insn.imm as u64) << 32);
},
ebpf::LD_B_REG => {
let vm_addr = (reg[src] as i64).wrapping_add(insn.off as i64) as u64;
let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Load, pc, u8);
reg[dst] = unsafe { *host_ptr as u64 };
},
ebpf::LD_H_REG => {
let vm_addr = (reg[src] as i64).wrapping_add(insn.off as i64) as u64;
let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Load, pc, u16);
reg[dst] = unsafe { *host_ptr as u64 };
},
ebpf::LD_W_REG => {
let vm_addr = (reg[src] as i64).wrapping_add(insn.off as i64) as u64;
let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Load, pc, u32);
reg[dst] = unsafe { *host_ptr as u64 };
},
ebpf::LD_DW_REG => {
let vm_addr = (reg[src] as i64).wrapping_add(insn.off as i64) as u64;
let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Load, pc, u64);
reg[dst] = unsafe { *host_ptr as u64 };
},
ebpf::ST_B_IMM => {
let vm_addr = (reg[dst] as i64).wrapping_add( insn.off as i64) as u64;
let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Store, pc, u8);
unsafe { *host_ptr = insn.imm as u8 };
},
ebpf::ST_H_IMM => {
let vm_addr = (reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Store, pc, u16);
unsafe { *host_ptr = insn.imm as u16 };
},
ebpf::ST_W_IMM => {
let vm_addr = (reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Store, pc, u32);
unsafe { *host_ptr = insn.imm as u32 };
},
ebpf::ST_DW_IMM => {
let vm_addr = (reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Store, pc, u64);
unsafe { *host_ptr = insn.imm as u64 };
},
ebpf::ST_B_REG => {
let vm_addr = (reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Store, pc, u8);
unsafe { *host_ptr = reg[src] as u8 };
},
ebpf::ST_H_REG => {
let vm_addr = (reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Store, pc, u16);
unsafe { *host_ptr = reg[src] as u16 };
},
ebpf::ST_W_REG => {
let vm_addr = (reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Store, pc, u32);
unsafe { *host_ptr = reg[src] as u32 };
},
ebpf::ST_DW_REG => {
let vm_addr = (reg[dst] as i64).wrapping_add(insn.off as i64) as u64;
let host_ptr = translate_memory_access!(self, vm_addr, AccessType::Store, pc, u64);
unsafe { *host_ptr = reg[src] as u64 };
},
ebpf::ADD32_IMM => reg[dst] = (reg[dst] as i32).wrapping_add(insn.imm) as u64,
ebpf::ADD32_REG => reg[dst] = (reg[dst] as i32).wrapping_add(reg[src] as i32) as u64,
ebpf::SUB32_IMM => reg[dst] = (reg[dst] as i32).wrapping_sub(insn.imm) as u64,
ebpf::SUB32_REG => reg[dst] = (reg[dst] as i32).wrapping_sub(reg[src] as i32) as u64,
ebpf::MUL32_IMM => reg[dst] = (reg[dst] as i32).wrapping_mul(insn.imm) as u64,
ebpf::MUL32_REG => reg[dst] = (reg[dst] as i32).wrapping_mul(reg[src] as i32) as u64,
ebpf::DIV32_IMM => reg[dst] = (reg[dst] as u32 / insn.imm as u32) as u64,
ebpf::DIV32_REG => {
if reg[src] as u32 == 0 {
return Err(EbpfError::DivideByZero(pc + ebpf::ELF_INSN_DUMP_OFFSET));
}
reg[dst] = (reg[dst] as u32 / reg[src] as u32) as u64;
},
ebpf::OR32_IMM => reg[dst] = (reg[dst] as u32 | insn.imm as u32) as u64,
ebpf::OR32_REG => reg[dst] = (reg[dst] as u32 | reg[src] as u32) as u64,
ebpf::AND32_IMM => reg[dst] = (reg[dst] as u32 & insn.imm as u32) as u64,
ebpf::AND32_REG => reg[dst] = (reg[dst] as u32 & reg[src] as u32) as u64,
ebpf::LSH32_IMM => reg[dst] = (reg[dst] as u32).wrapping_shl(insn.imm as u32) as u64,
ebpf::LSH32_REG => reg[dst] = (reg[dst] as u32).wrapping_shl(reg[src] as u32) as u64,
ebpf::RSH32_IMM => reg[dst] = (reg[dst] as u32).wrapping_shr(insn.imm as u32) as u64,
ebpf::RSH32_REG => reg[dst] = (reg[dst] as u32).wrapping_shr(reg[src] as u32) as u64,
ebpf::NEG32 => { reg[dst] = (reg[dst] as i32).wrapping_neg() as u64; reg[dst] &= U32MAX; },
ebpf::MOD32_IMM => reg[dst] = (reg[dst] as u32 % insn.imm as u32) as u64,
ebpf::MOD32_REG => {
if reg[src] as u32 == 0 {
return Err(EbpfError::DivideByZero(pc + ebpf::ELF_INSN_DUMP_OFFSET));
}
reg[dst] = (reg[dst] as u32 % reg[src] as u32) as u64;
},
ebpf::XOR32_IMM => reg[dst] = (reg[dst] as u32 ^ insn.imm as u32) as u64,
ebpf::XOR32_REG => reg[dst] = (reg[dst] as u32 ^ reg[src] as u32) as u64,
ebpf::MOV32_IMM => reg[dst] = insn.imm as u32 as u64,
ebpf::MOV32_REG => reg[dst] = (reg[src] as u32) as u64,
ebpf::ARSH32_IMM => { reg[dst] = (reg[dst] as i32).wrapping_shr(insn.imm as u32) as u64; reg[dst] &= U32MAX; },
ebpf::ARSH32_REG => { reg[dst] = (reg[dst] as i32).wrapping_shr(reg[src] as u32) as u64; reg[dst] &= U32MAX; },
ebpf::LE => {
reg[dst] = match insn.imm {
16 => (reg[dst] as u16).to_le() as u64,
32 => (reg[dst] as u32).to_le() as u64,
64 => reg[dst].to_le(),
_ => {
return Err(EbpfError::InvalidInstruction(pc + ebpf::ELF_INSN_DUMP_OFFSET));
}
};
},
ebpf::BE => {
reg[dst] = match insn.imm {
16 => (reg[dst] as u16).to_be() as u64,
32 => (reg[dst] as u32).to_be() as u64,
64 => reg[dst].to_be(),
_ => {
return Err(EbpfError::InvalidInstruction(pc + ebpf::ELF_INSN_DUMP_OFFSET));
}
};
},
ebpf::ADD64_IMM => reg[dst] = reg[dst].wrapping_add(insn.imm as u64),
ebpf::ADD64_REG => reg[dst] = reg[dst].wrapping_add(reg[src]),
ebpf::SUB64_IMM => reg[dst] = reg[dst].wrapping_sub(insn.imm as u64),
ebpf::SUB64_REG => reg[dst] = reg[dst].wrapping_sub(reg[src]),
ebpf::MUL64_IMM => reg[dst] = reg[dst].wrapping_mul(insn.imm as u64),
ebpf::MUL64_REG => reg[dst] = reg[dst].wrapping_mul(reg[src]),
ebpf::DIV64_IMM => reg[dst] /= insn.imm as u64,
ebpf::DIV64_REG => {
if reg[src] == 0 {
return Err(EbpfError::DivideByZero(pc + ebpf::ELF_INSN_DUMP_OFFSET));
}
reg[dst] /= reg[src];
},
ebpf::OR64_IMM => reg[dst] |= insn.imm as u64,
ebpf::OR64_REG => reg[dst] |= reg[src],
ebpf::AND64_IMM => reg[dst] &= insn.imm as u64,
ebpf::AND64_REG => reg[dst] &= reg[src],
ebpf::LSH64_IMM => reg[dst] = reg[dst].wrapping_shl(insn.imm as u32),
ebpf::LSH64_REG => reg[dst] = reg[dst].wrapping_shl(reg[src] as u32),
ebpf::RSH64_IMM => reg[dst] = reg[dst].wrapping_shr(insn.imm as u32),
ebpf::RSH64_REG => reg[dst] = reg[dst].wrapping_shr(reg[src] as u32),
ebpf::NEG64 => reg[dst] = (reg[dst] as i64).wrapping_neg() as u64,
ebpf::MOD64_IMM => reg[dst] %= insn.imm as u64,
ebpf::MOD64_REG => {
if reg[src] == 0 {
return Err(EbpfError::DivideByZero(pc + ebpf::ELF_INSN_DUMP_OFFSET));
}
reg[dst] %= reg[src];
},
ebpf::XOR64_IMM => reg[dst] ^= insn.imm as u64,
ebpf::XOR64_REG => reg[dst] ^= reg[src],
ebpf::MOV64_IMM => reg[dst] = insn.imm as u64,
ebpf::MOV64_REG => reg[dst] = reg[src],
ebpf::ARSH64_IMM => reg[dst] = (reg[dst] as i64).wrapping_shr(insn.imm as u32) as u64,
ebpf::ARSH64_REG => reg[dst] = (reg[dst] as i64).wrapping_shr(reg[src] as u32) as u64,
ebpf::JA => next_pc = (next_pc as isize + insn.off as isize) as usize,
ebpf::JEQ_IMM => if reg[dst] == insn.imm as u64 { next_pc = (next_pc as isize + insn.off as isize) as usize; },
ebpf::JEQ_REG => if reg[dst] == reg[src] { next_pc = (next_pc as isize + insn.off as isize) as usize; },
ebpf::JGT_IMM => if reg[dst] > insn.imm as u64 { next_pc = (next_pc as isize + insn.off as isize) as usize; },
ebpf::JGT_REG => if reg[dst] > reg[src] { next_pc = (next_pc as isize + insn.off as isize) as usize; },
ebpf::JGE_IMM => if reg[dst] >= insn.imm as u64 { next_pc = (next_pc as isize + insn.off as isize) as usize; },
ebpf::JGE_REG => if reg[dst] >= reg[src] { next_pc = (next_pc as isize + insn.off as isize) as usize; },
ebpf::JLT_IMM => if reg[dst] < insn.imm as u64 { next_pc = (next_pc as isize + insn.off as isize) as usize; },
ebpf::JLT_REG => if reg[dst] < reg[src] { next_pc = (next_pc as isize + insn.off as isize) as usize; },
ebpf::JLE_IMM => if reg[dst] <= insn.imm as u64 { next_pc = (next_pc as isize + insn.off as isize) as usize; },
ebpf::JLE_REG => if reg[dst] <= reg[src] { next_pc = (next_pc as isize + insn.off as isize) as usize; },
ebpf::JSET_IMM => if reg[dst] & insn.imm as u64 != 0 { next_pc = (next_pc as isize + insn.off as isize) as usize; },
ebpf::JSET_REG => if reg[dst] & reg[src] != 0 { next_pc = (next_pc as isize + insn.off as isize) as usize; },
ebpf::JNE_IMM => if reg[dst] != insn.imm as u64 { next_pc = (next_pc as isize + insn.off as isize) as usize; },
ebpf::JNE_REG => if reg[dst] != reg[src] { next_pc = (next_pc as isize + insn.off as isize) as usize; },
ebpf::JSGT_IMM => if reg[dst] as i64 > insn.imm as i64 { next_pc = (next_pc as isize + insn.off as isize) as usize; },
ebpf::JSGT_REG => if reg[dst] as i64 > reg[src] as i64 { next_pc = (next_pc as isize + insn.off as isize) as usize; },
ebpf::JSGE_IMM => if reg[dst] as i64 >= insn.imm as i64 { next_pc = (next_pc as isize + insn.off as isize) as usize; },
ebpf::JSGE_REG => if reg[dst] as i64 >= reg[src] as i64 { next_pc = (next_pc as isize + insn.off as isize) as usize; },
ebpf::JSLT_IMM => if (reg[dst] as i64) < insn.imm as i64 { next_pc = (next_pc as isize + insn.off as isize) as usize; },
ebpf::JSLT_REG => if (reg[dst] as i64) < reg[src] as i64 { next_pc = (next_pc as isize + insn.off as isize) as usize; },
ebpf::JSLE_IMM => if (reg[dst] as i64) <= insn.imm as i64 { next_pc = (next_pc as isize + insn.off as isize) as usize; },
ebpf::JSLE_REG => if (reg[dst] as i64) <= reg[src] as i64 { next_pc = (next_pc as isize + insn.off as isize) as usize; },
ebpf::CALL_REG => {
let target_address = reg[insn.imm as usize];
reg[ebpf::STACK_REG] =
self.frames.push(®[ebpf::FIRST_SCRATCH_REG..ebpf::FIRST_SCRATCH_REG + ebpf::SCRATCH_REGS], next_pc)?;
if target_address < self.program_vm_addr {
return Err(EbpfError::CallOutsideTextSegment(pc + ebpf::ELF_INSN_DUMP_OFFSET, target_address / ebpf::INSN_SIZE as u64 * ebpf::INSN_SIZE as u64));
}
next_pc = self.check_pc(pc, (target_address - self.program_vm_addr) as usize / ebpf::INSN_SIZE)?;
},
ebpf::CALL_IMM => {
if let Some(syscall) = self.executable.get_syscall_registry().lookup_syscall(insn.imm as u32) {
if instruction_meter_enabled {
let _ = instruction_meter.consume(self.last_insn_count);
}
self.last_insn_count = 0;
let mut result: ProgramResult<E> = Ok(0);
(unsafe { std::mem::transmute::<u64, SyscallFunction::<E, *mut u8>>(syscall.function) })(
self.syscall_context_objects[SYSCALL_CONTEXT_OBJECTS_OFFSET + syscall.context_object_slot],
reg[1],
reg[2],
reg[3],
reg[4],
reg[5],
&self.memory_mapping,
&mut result,
);
reg[0] = result?;
if instruction_meter_enabled {
remaining_insn_count = instruction_meter.get_remaining();
}
} else if let Some(target_pc) = self.executable.lookup_bpf_function(insn.imm as u32) {
reg[ebpf::STACK_REG] = self.frames.push(
®[ebpf::FIRST_SCRATCH_REG
..ebpf::FIRST_SCRATCH_REG + ebpf::SCRATCH_REGS],
next_pc,
)?;
next_pc = self.check_pc(pc, *target_pc)?;
} else {
self.executable.report_unresolved_symbol(pc)?;
}
}
ebpf::EXIT => {
match self.frames.pop::<E>() {
Ok((saved_reg, stack_ptr, ptr)) => {
reg[ebpf::FIRST_SCRATCH_REG
..ebpf::FIRST_SCRATCH_REG + ebpf::SCRATCH_REGS]
.copy_from_slice(&saved_reg);
reg[ebpf::STACK_REG] = stack_ptr;
next_pc = self.check_pc(pc, ptr)?;
}
_ => {
debug!("BPF instructions executed: {:?}", self.last_insn_count);
debug!(
"Max frame depth reached: {:?}",
self.frames.get_max_frame_index()
);
return Ok(reg[0]);
}
}
}
_ => return Err(EbpfError::UnsupportedInstruction(pc + ebpf::ELF_INSN_DUMP_OFFSET)),
}
if instruction_meter_enabled && self.last_insn_count >= remaining_insn_count {
return Err(EbpfError::ExceededMaxInstructions(pc + 1 + ebpf::ELF_INSN_DUMP_OFFSET, initial_insn_count));
}
}
Err(EbpfError::ExecutionOverrun(
next_pc + ebpf::ELF_INSN_DUMP_OFFSET,
))
}
fn check_pc(&self, current_pc: usize, target_pc: usize) -> Result<usize, EbpfError<E>> {
let offset =
target_pc
.checked_mul(ebpf::INSN_SIZE)
.ok_or(EbpfError::CallOutsideTextSegment(
current_pc + ebpf::ELF_INSN_DUMP_OFFSET,
self.program_vm_addr + (target_pc * ebpf::INSN_SIZE) as u64,
))?;
let _ = self.program.get(offset..offset + ebpf::INSN_SIZE).ok_or(
EbpfError::CallOutsideTextSegment(
current_pc + ebpf::ELF_INSN_DUMP_OFFSET,
self.program_vm_addr + (target_pc * ebpf::INSN_SIZE) as u64,
),
)?;
Ok(target_pc)
}
pub fn execute_program_jit(&mut self, instruction_meter: &mut I) -> ProgramResult<E> {
let reg1 = if self
.memory_mapping
.map::<UserError>(AccessType::Store, ebpf::MM_INPUT_START, 1)
.is_ok()
{
ebpf::MM_INPUT_START
} else {
0
};
let initial_insn_count = if self.executable.get_config().enable_instruction_meter {
instruction_meter.get_remaining()
} else {
0
};
let result: ProgramResult<E> = Ok(0);
let compiled_program = self
.executable
.get_compiled_program()
.ok_or(EbpfError::JitNotCompiled)?;
unsafe {
self.syscall_context_objects[SYSCALL_CONTEXT_OBJECTS_OFFSET - 1] =
&mut self.tracer as *mut _ as *mut u8;
self.last_insn_count = (compiled_program.main)(
&result,
reg1,
&*(self.syscall_context_objects.as_ptr() as *const JitProgramArgument),
instruction_meter,
)
.max(0) as u64;
}
if self.executable.get_config().enable_instruction_meter {
let remaining_insn_count = instruction_meter.get_remaining();
self.total_insn_count = remaining_insn_count - self.last_insn_count;
instruction_meter.consume(self.total_insn_count);
self.total_insn_count += initial_insn_count - remaining_insn_count;
}
match result {
Err(EbpfError::ExceededMaxInstructions(pc, _)) => {
Err(EbpfError::ExceededMaxInstructions(pc, initial_insn_count))
}
x => x,
}
}
}