#![allow(clippy::deprecated_cfg_attr)]
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unreachable_code)]
extern crate libc;
use std::fmt::Debug;
use std::mem;
use std::collections::HashMap;
use std::fmt::Formatter;
use std::fmt::Error as FormatterError;
use std::ops::{Index, IndexMut};
use crate::{
vm::{Config, Executable, ProgramResult, InstructionMeter, Tracer, DynTraitFatPointer, SYSCALL_CONTEXT_OBJECTS_OFFSET},
ebpf::{self, INSN_SIZE, FIRST_SCRATCH_REG, SCRATCH_REGS, STACK_REG, MM_STACK_START},
error::{UserDefinedError, EbpfError},
memory_region::{AccessType, MemoryMapping},
user_error::UserError,
};
pub struct JitProgramArgument<'a> {
pub memory_mapping: MemoryMapping<'a>,
pub syscall_context_objects: [*const u8; 0],
}
struct JitProgramSections {
pc_section: &'static mut [u64],
text_section: &'static mut [u8],
}
impl JitProgramSections {
fn new(pc: usize, code_size: usize) -> Self {
let _pc_loc_table_size = round_to_page_size(pc * 8);
let _code_size = round_to_page_size(code_size);
#[cfg(windows)]
{
Self {
pc_section: &mut [],
text_section: &mut [],
}
}
#[cfg(not(windows))]
unsafe {
let mut raw: *mut libc::c_void = std::mem::MaybeUninit::uninit().assume_init();
libc::posix_memalign(&mut raw, PAGE_SIZE, _pc_loc_table_size + _code_size);
std::ptr::write_bytes(raw, 0x00, _pc_loc_table_size);
std::ptr::write_bytes(raw.add(_pc_loc_table_size), 0xcc, _code_size);
Self {
pc_section: std::slice::from_raw_parts_mut(raw as *mut u64, pc),
text_section: std::slice::from_raw_parts_mut(raw.add(_pc_loc_table_size) as *mut u8, _code_size),
}
}
}
fn seal(&mut self) {
#[cfg(not(windows))]
if !self.pc_section.is_empty() {
unsafe {
libc::mprotect(self.pc_section.as_mut_ptr() as *mut _, round_to_page_size(self.pc_section.len()), libc::PROT_READ);
libc::mprotect(self.text_section.as_mut_ptr() as *mut _, round_to_page_size(self.text_section.len()), libc::PROT_EXEC | libc::PROT_READ);
}
}
}
}
impl Drop for JitProgramSections {
fn drop(&mut self) {
#[cfg(not(windows))]
if !self.pc_section.is_empty() {
unsafe {
libc::mprotect(self.pc_section.as_mut_ptr() as *mut _, round_to_page_size(self.pc_section.len()), libc::PROT_READ | libc::PROT_WRITE);
libc::mprotect(self.text_section.as_mut_ptr() as *mut _, round_to_page_size(self.text_section.len()), libc::PROT_READ | libc::PROT_WRITE);
libc::free(self.pc_section.as_ptr() as *mut _);
}
}
}
}
pub struct JitProgram<E: UserDefinedError, I: InstructionMeter> {
_sections: JitProgramSections,
pub main: unsafe fn(&ProgramResult<E>, u64, &JitProgramArgument, &mut I) -> i64,
}
impl<E: UserDefinedError, I: InstructionMeter> Debug for JitProgram<E, I> {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
fmt.write_fmt(format_args!("JitProgram {:?}", &self.main as *const _))
}
}
impl<E: UserDefinedError, I: InstructionMeter> PartialEq for JitProgram<E, I> {
fn eq(&self, other: &Self) -> bool {
std::ptr::eq(self.main as *const u8, other.main as *const u8)
}
}
impl<E: UserDefinedError, I: InstructionMeter> JitProgram<E, I> {
pub fn new(executable: &dyn Executable<E, I>) -> Result<Self, EbpfError<E>> {
let program = executable.get_text_bytes()?.1;
let mut jit = JitCompiler::new(program, executable.get_config());
jit.compile::<E, I>(executable)?;
let main = unsafe { mem::transmute(jit.result.text_section.as_ptr()) };
Ok(Self {
_sections: jit.result,
main,
})
}
}
const TARGET_PC_TRACE: usize = std::usize::MAX - 13;
const TARGET_PC_TRANSLATE_PC: usize = std::usize::MAX - 12;
const TARGET_PC_TRANSLATE_PC_LOOP: usize = std::usize::MAX - 11;
const TARGET_PC_CALL_EXCEEDED_MAX_INSTRUCTIONS: usize = std::usize::MAX - 10;
const TARGET_PC_CALL_DEPTH_EXCEEDED: usize = std::usize::MAX - 9;
const TARGET_PC_CALL_OUTSIDE_TEXT_SEGMENT: usize = std::usize::MAX - 8;
const TARGET_PC_CALLX_UNSUPPORTED_INSTRUCTION: usize = std::usize::MAX - 7;
const TARGET_PC_CALL_UNSUPPORTED_INSTRUCTION: usize = std::usize::MAX - 6;
const TARGET_PC_DIV_BY_ZERO: usize = std::usize::MAX - 5;
const TARGET_PC_EXCEPTION_AT: usize = std::usize::MAX - 4;
const TARGET_PC_SYSCALL_EXCEPTION: usize = std::usize::MAX - 3;
const TARGET_PC_EXIT: usize = std::usize::MAX - 2;
const TARGET_PC_EPILOGUE: usize = std::usize::MAX - 1;
#[derive(Copy, Clone)]
enum OperandSize {
S8 = 8,
S16 = 16,
S32 = 32,
S64 = 64,
}
const RAX: u8 = 0;
const RCX: u8 = 1;
const RDX: u8 = 2;
const RBX: u8 = 3;
const RSP: u8 = 4;
const RBP: u8 = 5;
const RSI: u8 = 6;
const RDI: u8 = 7;
const R8: u8 = 8;
const R9: u8 = 9;
const R10: u8 = 10;
const R11: u8 = 11;
const R12: u8 = 12;
const R13: u8 = 13;
const R14: u8 = 14;
const R15: u8 = 15;
const ARGUMENT_REGISTERS: [u8; 6] = [
RDI, RSI, RDX, RCX, R8, R9
];
const CALLER_SAVED_REGISTERS: [u8; 9] = [
RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11
];
const CALLEE_SAVED_REGISTERS: [u8; 6] = [
RBP, RBX, R12, R13, R14, R15
];
const REGISTER_MAP: [u8; 11] = [
RAX,
ARGUMENT_REGISTERS[1],
ARGUMENT_REGISTERS[2],
ARGUMENT_REGISTERS[3],
ARGUMENT_REGISTERS[4],
ARGUMENT_REGISTERS[5],
CALLEE_SAVED_REGISTERS[2],
CALLEE_SAVED_REGISTERS[3],
CALLEE_SAVED_REGISTERS[4],
CALLEE_SAVED_REGISTERS[5],
RBX,
];
#[inline]
fn emit<T, E: UserDefinedError>(jit: &mut JitCompiler, data: T) -> Result<(), EbpfError<E>> {
let size = mem::size_of::<T>() as usize;
if jit.offset_in_text_section + size > jit.result.text_section.len() {
return Err(EbpfError::ExhausedTextSegment(jit.pc));
}
unsafe {
#[allow(clippy::cast_ptr_alignment)]
let ptr = jit.result.text_section.as_ptr().add(jit.offset_in_text_section) as *mut T;
*ptr = data as T;
}
jit.offset_in_text_section += size;
Ok(())
}
#[allow(dead_code)]
#[inline]
fn emit_debugger_trap<E: UserDefinedError>(jit: &mut JitCompiler) -> Result<(), EbpfError<E>> {
emit::<u8, E>(jit, 0xcc)
}
#[inline]
fn emit_modrm<E: UserDefinedError>(jit: &mut JitCompiler, modrm: u8, r: u8, m: u8) -> Result<(), EbpfError<E>> {
debug_assert_eq!((modrm | 0xc0), 0xc0);
emit::<u8, E>(jit, (modrm & 0xc0) | ((r & 0b111) << 3) | (m & 0b111))
}
#[inline]
fn emit_modrm_reg2reg<E: UserDefinedError>(jit: &mut JitCompiler, r: u8, m: u8) -> Result<(), EbpfError<E>> {
emit_modrm(jit, 0xc0, r, m)
}
#[inline]
fn emit_sib<E: UserDefinedError>(jit: &mut JitCompiler, scale: u8, index: u8, base: u8) -> Result<(), EbpfError<E>> {
debug_assert_eq!((scale | 0xc0), 0xc0);
emit::<u8, E>(jit, (scale & 0xc0) | ((index & 0b111) << 3) | (base & 0b111))
}
#[inline]
fn emit_modrm_and_displacement<E: UserDefinedError>(jit: &mut JitCompiler, r: u8, m: u8, d: i32) -> Result<(), EbpfError<E>> {
if d == 0 && (m & 0b111) != RBP {
emit_modrm(jit, 0x00, r, m)?;
if (m & 0b111) == RSP {
emit_sib(jit, 0, m, m)?;
}
} else if d >= -128 && d <= 127 {
emit_modrm(jit, 0x40, r, m)?;
if (m & 0b111) == RSP {
emit_sib(jit, 0, m, m)?;
}
emit::<u8, E>(jit, d as u8)?;
} else {
emit_modrm(jit, 0x80, r, m)?;
if (m & 0b111) == RSP {
emit_sib(jit, 0, m, m)?;
}
emit::<u32, E>(jit, d as u32)?;
}
Ok(())
}
#[inline]
fn emit_rex<E: UserDefinedError>(jit: &mut JitCompiler, w: u8, r: u8, x: u8, b: u8) -> Result<(), EbpfError<E>> {
debug_assert_eq!((w | 1), 1);
debug_assert_eq!((r | 1), 1);
debug_assert_eq!((x | 1), 1);
debug_assert_eq!((b | 1), 1);
emit::<u8, E>(jit, 0x40 | (w << 3) | (r << 2) | (x << 1) | b)
}
#[inline]
fn emit_basic_rex<E: UserDefinedError>(jit: &mut JitCompiler, w: u8, src: u8, dst: u8) -> Result<(), EbpfError<E>> {
let is_masked = | val, mask | if val & mask == 0 { 0 } else { 1 };
let src_masked = is_masked(src, 0b1000);
let dst_masked = is_masked(dst, 0b1000);
if w != 0 || src_masked != 0 || dst_masked != 0 {
emit_rex(jit, w, src_masked, 0, dst_masked)?;
}
Ok(())
}
#[inline]
fn emit_push<E: UserDefinedError>(jit: &mut JitCompiler, r: u8) -> Result<(), EbpfError<E>> {
emit_basic_rex(jit, 0, 0, r)?;
emit::<u8, E>(jit, 0x50 | (r & 0b111))
}
#[inline]
fn emit_pop<E: UserDefinedError>(jit: &mut JitCompiler, r: u8) -> Result<(), EbpfError<E>> {
emit_basic_rex(jit, 0, 0, r)?;
emit::<u8, E>(jit, 0x58 | (r & 0b111))
}
#[derive(PartialEq, Copy, Clone)]
enum OperationWidth {
Bit32 = 0,
Bit64 = 1,
}
#[inline]
fn emit_alu<E: UserDefinedError>(jit: &mut JitCompiler, width: OperationWidth, op: u8, src: u8, dst: u8, imm: i32, displacement: Option<i32>) -> Result<(), EbpfError<E>> {
emit_basic_rex(jit, width as u8, src, dst)?;
emit::<u8, E>(jit, op)?;
match displacement {
Some(d) => {
emit_modrm_and_displacement(jit, src, dst, d)?;
},
None => {
emit_modrm_reg2reg(jit, src, dst)?;
}
}
match op {
0xc1 => emit::<u8, E>(jit, imm as u8)?,
0x81 | 0xc7 => emit::<u32, E>(jit, imm as u32)?,
0xf7 if src == 0 => emit::<u32, E>(jit, imm as u32)?,
_ => {}
}
Ok(())
}
#[inline]
fn emit_mov<E: UserDefinedError>(jit: &mut JitCompiler, width: OperationWidth, src: u8, dst: u8) -> Result<(), EbpfError<E>> {
emit_alu(jit, width, 0x89, src, dst, 0, None)
}
#[inline]
fn sign_extend_i32_to_i64<E: UserDefinedError>(jit: &mut JitCompiler, src: u8, dst: u8) -> Result<(), EbpfError<E>> {
emit_alu(jit, OperationWidth::Bit64, 0x63, src, dst, 0, None)
}
#[inline]
fn emit_xchg<E: UserDefinedError>(jit: &mut JitCompiler, src: u8, dst: u8) -> Result<(), EbpfError<E>> {
emit_alu(jit, OperationWidth::Bit64, 0x87, src, dst, 0, None)
}
#[inline]
fn emit_cmp_imm32<E: UserDefinedError>(jit: &mut JitCompiler, dst: u8, imm: i32, displacement: Option<i32>) -> Result<(), EbpfError<E>> {
emit_alu(jit, OperationWidth::Bit64, 0x81, 7, dst, imm, displacement)
}
#[inline]
fn emit_cmp<E: UserDefinedError>(jit: &mut JitCompiler, src: u8, dst: u8, displacement: Option<i32>) -> Result<(), EbpfError<E>> {
emit_alu(jit, OperationWidth::Bit64, 0x39, src, dst, 0, displacement)
}
#[inline]
fn emit_jump_offset<E: UserDefinedError>(jit: &mut JitCompiler, target_pc: usize) -> Result<(), EbpfError<E>> {
jit.text_section_jumps.push(Jump { location: jit.offset_in_text_section, target_pc });
emit::<u32, E>(jit, 0)
}
#[inline]
fn emit_jcc<E: UserDefinedError>(jit: &mut JitCompiler, code: u8, target_pc: usize) -> Result<(), EbpfError<E>> {
emit::<u8, E>(jit, 0x0f)?;
emit::<u8, E>(jit, code)?;
emit_jump_offset(jit, target_pc)
}
#[inline]
fn emit_jmp<E: UserDefinedError>(jit: &mut JitCompiler, target_pc: usize) -> Result<(), EbpfError<E>> {
emit::<u8, E>(jit, 0xe9)?;
emit_jump_offset(jit, target_pc)
}
#[inline]
fn emit_call<E: UserDefinedError>(jit: &mut JitCompiler, target_pc: usize) -> Result<(), EbpfError<E>> {
emit::<u8, E>(jit, 0xe8)?;
emit_jump_offset(jit, target_pc)
}
#[inline]
fn set_anchor(jit: &mut JitCompiler, target: usize) {
jit.handler_anchors.insert(target, jit.offset_in_text_section);
}
#[inline]
fn emit_load<E: UserDefinedError>(jit: &mut JitCompiler, size: OperandSize, src: u8, dst: u8, offset: i32) -> Result<(), EbpfError<E>> {
let data = match size {
OperandSize::S64 => 1,
_ => 0
};
emit_basic_rex(jit, data, dst, src)?;
match size {
OperandSize::S8 => {
emit::<u8, E>(jit, 0x0f)?;
emit::<u8, E>(jit, 0xb6)?;
},
OperandSize::S16 => {
emit::<u8, E>(jit, 0x0f)?;
emit::<u8, E>(jit, 0xb7)?;
},
OperandSize::S32 | OperandSize::S64 => {
emit::<u8, E>(jit, 0x8b)?;
}
}
emit_modrm_and_displacement(jit, dst, src, offset)
}
#[inline]
fn emit_load_imm<E: UserDefinedError>(jit: &mut JitCompiler, dst: u8, imm: i64) -> Result<(), EbpfError<E>> {
if imm >= std::i32::MIN as i64 && imm <= std::i32::MAX as i64 {
emit_alu(jit, OperationWidth::Bit64, 0xc7, 0, dst, imm as i32, None)
} else {
emit_basic_rex(jit, 1, 0, dst)?;
emit::<u8, E>(jit, 0xb8 | (dst & 0b111))?;
emit::<u64, E>(jit, imm as u64)
}
}
#[allow(dead_code)]
#[inline]
fn emit_leaq<E: UserDefinedError>(jit: &mut JitCompiler, src: u8, dst: u8, offset: i32) -> Result<(), EbpfError<E>> {
emit_basic_rex(jit, 1, dst, src)?;
emit::<u8, E>(jit, 0x8d)?;
emit_modrm_and_displacement(jit, dst, src, offset)
}
#[inline]
fn emit_store<E: UserDefinedError>(jit: &mut JitCompiler, size: OperandSize, src: u8, dst: u8, offset: i32) -> Result<(), EbpfError<E>> {
if let OperandSize::S16 = size {
emit::<u8, E>(jit, 0x66)?;
}
let (is_s8, is_u64, rexw) = match size {
OperandSize::S8 => (true, false, 0),
OperandSize::S64 => (false, true, 1),
_ => (false, false, 0),
};
if is_u64 || (src & 0b1000) != 0 || (dst & 0b1000) != 0 || is_s8 {
let is_masked = | val, mask | {
match val & mask {
0 => 0,
_ => 1
}
};
emit_rex(jit, rexw, is_masked(src, 8), 0, is_masked(dst, 8))?;
}
match size {
OperandSize::S8 => emit::<u8, E>(jit, 0x88)?,
_ => emit::<u8, E>(jit, 0x89)?,
};
emit_modrm_and_displacement(jit, src, dst, offset)
}
#[inline]
fn emit_store_imm32<E: UserDefinedError>(jit: &mut JitCompiler, size: OperandSize, dst: u8, offset: i32, imm: i32) -> Result<(), EbpfError<E>> {
if let OperandSize::S16 = size {
emit::<u8, E>(jit, 0x66)?;
}
match size {
OperandSize::S64 => emit_basic_rex(jit, 1, 0, dst)?,
_ => emit_basic_rex(jit, 0, 0, dst)?,
};
match size {
OperandSize::S8 => emit::<u8, E>(jit, 0xc6)?,
_ => emit::<u8, E>(jit, 0xc7)?,
};
emit_modrm_and_displacement(jit, 0, dst, offset)?;
match size {
OperandSize::S8 => emit::<u8, E>(jit, imm as u8),
OperandSize::S16 => emit::<u16, E>(jit, imm as u16),
_ => emit::<u32, E>(jit, imm as u32),
}
}
#[inline]
fn emit_profile_instruction_count<E: UserDefinedError>(jit: &mut JitCompiler, target_pc: Option<usize>) -> Result<(), EbpfError<E>> {
if jit.config.enable_instruction_meter {
match target_pc {
Some(target_pc) => {
emit_alu(jit, OperationWidth::Bit64, 0x81, 0, ARGUMENT_REGISTERS[0], target_pc as i32 - jit.pc as i32 - 1, None)?;
},
None => {
emit_pop(jit, R11)?;
emit_alu(jit, OperationWidth::Bit64, 0x81, 5, ARGUMENT_REGISTERS[0], jit.pc as i32 + 1, None)?;
emit_alu(jit, OperationWidth::Bit64, 0x01, R11, ARGUMENT_REGISTERS[0], jit.pc as i32, None)?;
},
}
}
Ok(())
}
#[inline]
fn emit_validate_and_profile_instruction_count<E: UserDefinedError>(jit: &mut JitCompiler, exclusive: bool, target_pc: Option<usize>) -> Result<(), EbpfError<E>> {
if jit.config.enable_instruction_meter {
emit_cmp_imm32(jit, ARGUMENT_REGISTERS[0], jit.pc as i32 + 1, None)?;
emit_jcc(jit, if exclusive { 0x82 } else { 0x86 }, TARGET_PC_CALL_EXCEEDED_MAX_INSTRUCTIONS)?;
emit_profile_instruction_count(jit, target_pc)?;
}
Ok(())
}
#[inline]
fn emit_undo_profile_instruction_count<E: UserDefinedError>(jit: &mut JitCompiler, target_pc: usize) -> Result<(), EbpfError<E>> {
if jit.config.enable_instruction_meter {
emit_alu(jit, OperationWidth::Bit64, 0x81, 0, ARGUMENT_REGISTERS[0], jit.pc as i32 + 1 - target_pc as i32, None)?;
}
Ok(())
}
#[inline]
fn emit_profile_instruction_count_of_exception<E: UserDefinedError>(jit: &mut JitCompiler) -> Result<(), EbpfError<E>> {
emit_alu(jit, OperationWidth::Bit64, 0x81, 0, R11, 1, None)?;
if jit.config.enable_instruction_meter {
emit_alu(jit, OperationWidth::Bit64, 0x29, R11, ARGUMENT_REGISTERS[0], 0, None)?;
}
Ok(())
}
#[inline]
fn emit_conditional_branch_reg<E: UserDefinedError>(jit: &mut JitCompiler, op: u8, src: u8, dst: u8, target_pc: usize) -> Result<(), EbpfError<E>> {
emit_validate_and_profile_instruction_count(jit, false, Some(target_pc))?;
emit_cmp(jit, src, dst, None)?;
emit_jcc(jit, op, target_pc)?;
emit_undo_profile_instruction_count(jit, target_pc)
}
#[inline]
fn emit_conditional_branch_imm<E: UserDefinedError>(jit: &mut JitCompiler, op: u8, imm: i32, dst: u8, target_pc: usize) -> Result<(), EbpfError<E>> {
emit_validate_and_profile_instruction_count(jit, false, Some(target_pc))?;
emit_cmp_imm32(jit, dst, imm, None)?;
emit_jcc(jit, op, target_pc)?;
emit_undo_profile_instruction_count(jit, target_pc)
}
enum Value {
Register(u8),
RegisterIndirect(u8, i32),
RegisterPlusConstant64(u8, i64),
Constant64(i64),
}
#[inline]
fn emit_bpf_call<E: UserDefinedError>(jit: &mut JitCompiler, dst: Value, number_of_instructions: usize) -> Result<(), EbpfError<E>> {
for reg in REGISTER_MAP.iter().skip(FIRST_SCRATCH_REG).take(SCRATCH_REGS) {
emit_push(jit, *reg)?;
}
emit_push(jit, REGISTER_MAP[STACK_REG])?;
match dst {
Value::Register(reg) => {
emit_push(jit, REGISTER_MAP[0])?;
if reg != REGISTER_MAP[0] {
emit_mov(jit, OperationWidth::Bit64, reg, REGISTER_MAP[0])?;
}
emit_alu(jit, OperationWidth::Bit64, 0x81, 4, REGISTER_MAP[0], !(INSN_SIZE as i32 - 1), None)?;
emit_load_imm(jit, R11, jit.pc as i64)?;
emit_load_imm(jit, REGISTER_MAP[STACK_REG], jit.program_vm_addr as i64 + (number_of_instructions * INSN_SIZE) as i64)?;
emit_cmp(jit, REGISTER_MAP[STACK_REG], REGISTER_MAP[0], None)?;
emit_jcc(jit, 0x83, TARGET_PC_CALL_OUTSIDE_TEXT_SEGMENT)?;
emit_load_imm(jit, REGISTER_MAP[STACK_REG], jit.program_vm_addr as i64)?;
emit_cmp(jit, REGISTER_MAP[STACK_REG], REGISTER_MAP[0], None)?;
emit_jcc(jit, 0x82, TARGET_PC_CALL_OUTSIDE_TEXT_SEGMENT)?;
emit_alu(jit, OperationWidth::Bit64, 0x29, REGISTER_MAP[STACK_REG], REGISTER_MAP[0], 0, None)?;
if jit.config.enable_instruction_meter {
let shift_amount = INSN_SIZE.trailing_zeros();
debug_assert_eq!(INSN_SIZE, 1<<shift_amount);
emit_mov(jit, OperationWidth::Bit64, REGISTER_MAP[0], REGISTER_MAP[STACK_REG])?;
emit_alu(jit, OperationWidth::Bit64, 0xc1, 5, REGISTER_MAP[STACK_REG], shift_amount as i32, None)?;
emit_push(jit, REGISTER_MAP[STACK_REG])?;
}
debug_assert_eq!(INSN_SIZE, 8);
emit_mov(jit, OperationWidth::Bit64, REGISTER_MAP[0], REGISTER_MAP[STACK_REG])?;
emit_load_imm(jit, REGISTER_MAP[STACK_REG], jit.result.pc_section.as_ptr() as i64)?;
emit_alu(jit, OperationWidth::Bit64, 0x01, REGISTER_MAP[STACK_REG], REGISTER_MAP[0], 0, None)?;
emit_load(jit, OperandSize::S64, REGISTER_MAP[0], REGISTER_MAP[0], 0)?;
},
Value::Constant64(_target_pc) => {},
_ => {
#[cfg(debug_assertions)]
unreachable!();
}
}
emit_load(jit, OperandSize::S64, RBP, REGISTER_MAP[STACK_REG], -8 * CALLEE_SAVED_REGISTERS.len() as i32)?;
emit_alu(jit, OperationWidth::Bit64, 0x81, 4, REGISTER_MAP[STACK_REG], !(jit.config.stack_frame_size as i32 * 2 - 1), None)?;
emit_alu(jit, OperationWidth::Bit64, 0x81, 0, REGISTER_MAP[STACK_REG], jit.config.stack_frame_size as i32 * 3, None)?;
emit_store(jit, OperandSize::S64, REGISTER_MAP[STACK_REG], RBP, -8 * CALLEE_SAVED_REGISTERS.len() as i32)?;
emit_load_imm(jit, R11, MM_STACK_START as i64 + (jit.config.max_call_depth * jit.config.stack_frame_size * 2) as i64)?;
emit_cmp(jit, R11, REGISTER_MAP[STACK_REG], None)?;
emit_load_imm(jit, R11, jit.pc as i64)?;
emit_jcc(jit, 0x83, TARGET_PC_CALL_DEPTH_EXCEEDED)?;
match dst {
Value::Register(_reg) => {
emit_validate_and_profile_instruction_count(jit, false, None)?;
emit_mov(jit, OperationWidth::Bit64, REGISTER_MAP[0], R11)?;
emit_pop(jit, REGISTER_MAP[0])?;
emit::<u8, E>(jit, 0x41)?;
emit::<u8, E>(jit, 0xff)?;
emit::<u8, E>(jit, 0xd3)?;
},
Value::Constant64(target_pc) => {
emit_validate_and_profile_instruction_count(jit, false, Some(target_pc as usize))?;
emit_load_imm(jit, R11, target_pc as i64)?;
emit_call(jit, target_pc as usize)?;
},
_ => {
#[cfg(debug_assertions)]
unreachable!();
}
}
emit_undo_profile_instruction_count(jit, 0)?;
emit_pop(jit, REGISTER_MAP[STACK_REG])?;
for reg in REGISTER_MAP.iter().skip(FIRST_SCRATCH_REG).take(SCRATCH_REGS).rev() {
emit_pop(jit, *reg)?;
}
Ok(())
}
struct Argument {
index: usize,
value: Value,
}
#[inline]
fn emit_rust_call<E: UserDefinedError>(jit: &mut JitCompiler, function: *const u8, arguments: &[Argument], return_reg: Option<u8>, check_exception: bool) -> Result<(), EbpfError<E>> {
let mut saved_registers = CALLER_SAVED_REGISTERS.to_vec();
if let Some(reg) = return_reg {
let dst = saved_registers.iter().position(|x| *x == reg);
debug_assert!(dst.is_some());
if let Some(dst) = dst {
saved_registers.remove(dst);
}
}
for argument in arguments {
if argument.index < ARGUMENT_REGISTERS.len() {
continue;
}
match argument.value {
Value::Register(reg) => {
let src = saved_registers.iter().position(|x| *x == reg);
debug_assert!(src.is_some());
if let Some(src) = src {
saved_registers.remove(src);
}
let dst = saved_registers.len() - (argument.index - ARGUMENT_REGISTERS.len());
saved_registers.insert(dst, reg);
},
Value::RegisterIndirect(reg, offset) => {
emit_load(jit, OperandSize::S64, reg, R11, offset)?;
},
_ => {
#[cfg(debug_assertions)]
unreachable!();
}
}
}
for reg in saved_registers.iter() {
emit_push(jit, *reg)?;
}
for argument in arguments {
if argument.index >= ARGUMENT_REGISTERS.len() {
continue;
}
let dst = ARGUMENT_REGISTERS[argument.index];
match argument.value {
Value::Register(reg) => {
if reg != dst {
emit_mov(jit, OperationWidth::Bit64, reg, dst)?;
}
},
Value::RegisterIndirect(reg, offset) => {
emit_load(jit, OperandSize::S64, reg, dst, offset)?;
},
Value::RegisterPlusConstant64(reg, offset) => {
emit_load_imm(jit, R11, offset)?;
emit_alu(jit, OperationWidth::Bit64, 0x01, reg, R11, 0, None)?;
emit_mov(jit, OperationWidth::Bit64, R11, dst)?;
},
Value::Constant64(value) => {
emit_load_imm(jit, dst, value)?;
},
}
}
emit_load_imm(jit, RAX, function as i64)?;
emit::<u8, E>(jit, 0xff)?;
emit::<u8, E>(jit, 0xd0)?;
if let Some(reg) = return_reg {
emit_mov(jit, OperationWidth::Bit64, RAX, reg)?;
}
for reg in saved_registers.iter().rev() {
emit_pop(jit, *reg)?;
}
if check_exception {
emit_load(jit, OperandSize::S64, RBP, R11, -8 * (CALLEE_SAVED_REGISTERS.len() + 1) as i32)?;
emit_cmp_imm32(jit, R11, 0, Some(0))?;
}
Ok(())
}
#[inline]
fn emit_address_translation<E: UserDefinedError>(jit: &mut JitCompiler, host_addr: u8, vm_addr: Value, len: u64, access_type: AccessType) -> Result<(), EbpfError<E>> {
emit_rust_call(jit, MemoryMapping::map::<UserError> as *const u8, &[
Argument { index: 3, value: vm_addr },
Argument { index: 0, value: Value::RegisterIndirect(RBP, -8 * (CALLEE_SAVED_REGISTERS.len() + 1) as i32) },
Argument { index: 1, value: Value::Register(R10) },
Argument { index: 2, value: Value::Constant64(access_type as i64) },
Argument { index: 4, value: Value::Constant64(len as i64) },
], None, true)?;
emit_load_imm(jit, R11, jit.pc as i64)?;
emit_jcc(jit, 0x85, TARGET_PC_EXCEPTION_AT)?;
emit_load(jit, OperandSize::S64, RBP, R11, -8 * (CALLEE_SAVED_REGISTERS.len() + 1) as i32)?;
emit_load(jit, OperandSize::S64, R11, host_addr, 8)
}
fn emit_shift<E: UserDefinedError>(jit: &mut JitCompiler, width: OperationWidth, opc: u8, src: u8, dst: u8) -> Result<(), EbpfError<E>> {
if width == OperationWidth::Bit32 {
emit_alu(jit, OperationWidth::Bit32, 0x81, 4, dst, -1, None)?;
}
if src == RCX {
if dst == RCX {
emit_alu(jit, width, 0xd3, opc, dst, 0, None)
} else {
emit_mov(jit, OperationWidth::Bit64, RCX, R11)?;
emit_alu(jit, width, 0xd3, opc, dst, 0, None)?;
emit_mov(jit, OperationWidth::Bit64, R11, RCX)
}
} else if dst == RCX {
emit_mov(jit, OperationWidth::Bit64, src, R11)?;
emit_xchg(jit, src, RCX)?;
emit_alu(jit, width, 0xd3, opc, src, 0, None)?;
emit_mov(jit, OperationWidth::Bit64, src, RCX)?;
emit_mov(jit, OperationWidth::Bit64, R11, src)
} else {
emit_mov(jit, OperationWidth::Bit64, RCX, R11)?;
emit_mov(jit, OperationWidth::Bit64, src, RCX)?;
emit_alu(jit, width, 0xd3, opc, dst, 0, None)?;
emit_mov(jit, OperationWidth::Bit64, R11, RCX)
}
}
fn emit_muldivmod<E: UserDefinedError>(jit: &mut JitCompiler, opc: u8, src: u8, dst: u8, imm: Option<i32>) -> Result<(), EbpfError<E>> {
let mul = (opc & ebpf::BPF_ALU_OP_MASK) == (ebpf::MUL32_IMM & ebpf::BPF_ALU_OP_MASK);
let div = (opc & ebpf::BPF_ALU_OP_MASK) == (ebpf::DIV32_IMM & ebpf::BPF_ALU_OP_MASK);
let modrm = (opc & ebpf::BPF_ALU_OP_MASK) == (ebpf::MOD32_IMM & ebpf::BPF_ALU_OP_MASK);
let width = if (opc & ebpf::BPF_CLS_MASK) == ebpf::BPF_ALU64 { OperationWidth::Bit64 } else { OperationWidth::Bit32 };
if (div || modrm) && imm.is_none() {
emit_load_imm(jit, R11, jit.pc as i64)?;
emit_alu(jit, width, 0x85, src, src, 0, None)?;
emit_jcc(jit, 0x84, TARGET_PC_DIV_BY_ZERO)?;
}
if dst != RAX {
emit_push(jit, RAX)?;
}
if dst != RDX {
emit_push(jit, RDX)?;
}
if let Some(imm) = imm {
emit_load_imm(jit, R11, imm as i64)?;
} else {
emit_mov(jit, OperationWidth::Bit64, src, R11)?;
}
if dst != RAX {
emit_mov(jit, OperationWidth::Bit64, dst, RAX)?;
}
if div || modrm {
emit_alu(jit, width, 0x31, RDX, RDX, 0, None)?;
}
emit_alu(jit, width, 0xf7, if mul { 4 } else { 6 }, R11, 0, None)?;
if dst != RDX {
if modrm {
emit_mov(jit, OperationWidth::Bit64, RDX, dst)?;
}
emit_pop(jit, RDX)?;
}
if dst != RAX {
if div || mul {
emit_mov(jit, OperationWidth::Bit64, RAX, dst)?;
}
emit_pop(jit, RAX)?;
}
if width == OperationWidth::Bit32 && opc & ebpf::BPF_ALU_OP_MASK == ebpf::BPF_MUL {
sign_extend_i32_to_i64(jit, dst, dst)?;
}
Ok(())
}
#[inline]
fn emit_set_exception_kind<E: UserDefinedError>(jit: &mut JitCompiler, err: EbpfError<E>) -> Result<(), EbpfError<E>> {
let err = Result::<u64, EbpfError<E>>::Err(err);
let err_kind = unsafe { *(&err as *const _ as *const u64).offset(1) };
emit_load(jit, OperandSize::S64, RBP, R10, -8 * (CALLEE_SAVED_REGISTERS.len() + 1) as i32)?;
emit_store_imm32(jit, OperandSize::S64, R10, 8, err_kind as i32)
}
const PAGE_SIZE: usize = 4096;
fn round_to_page_size(value: usize) -> usize {
(value + PAGE_SIZE - 1) / PAGE_SIZE * PAGE_SIZE
}
#[derive(Debug)]
struct Jump {
location: usize,
target_pc: usize,
}
impl Jump {
fn get_target_offset(&self, jit: &JitCompiler) -> u64 {
match jit.handler_anchors.get(&self.target_pc) {
Some(target) => *target as u64,
None => jit.result.pc_section[self.target_pc]
}
}
}
struct JitCompiler {
result: JitProgramSections,
pc_section_jumps: Vec<Jump>,
text_section_jumps: Vec<Jump>,
offset_in_text_section: usize,
pc: usize,
program_vm_addr: u64,
handler_anchors: HashMap<usize, usize>,
config: Config,
}
impl Index<usize> for JitCompiler {
type Output = u8;
fn index(&self, _index: usize) -> &u8 {
&self.result.text_section[_index]
}
}
impl IndexMut<usize> for JitCompiler {
fn index_mut(&mut self, _index: usize) -> &mut u8 {
&mut self.result.text_section[_index]
}
}
impl std::fmt::Debug for JitCompiler {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), FormatterError> {
fmt.write_str("JIT text_section: [")?;
for i in self.result.text_section as &[u8] {
fmt.write_fmt(format_args!(" {:#04x},", i))?;
};
fmt.write_str(" ] | ")?;
fmt.debug_struct("JIT state")
.field("memory", &self.result.pc_section.as_ptr())
.field("pc", &self.pc)
.field("offset_in_text_section", &self.offset_in_text_section)
.field("pc_section", &self.result.pc_section)
.field("handler_anchors", &self.handler_anchors)
.field("pc_section_jumps", &self.pc_section_jumps)
.field("text_section_jumps", &self.text_section_jumps)
.finish()
}
}
impl JitCompiler {
fn new(_program: &[u8], _config: &Config) -> JitCompiler {
#[cfg(windows)]
{
panic!("JIT not supported on windows");
}
let mut pc = 0;
while pc * ebpf::INSN_SIZE < _program.len() {
let insn = ebpf::get_insn(_program, pc);
pc += match insn.opc {
ebpf::LD_DW_IMM => 2,
_ => 1,
};
}
JitCompiler {
result: JitProgramSections::new(pc + 1, pc * 256 + 512),
pc_section_jumps: vec![],
text_section_jumps: vec![],
offset_in_text_section: 0,
pc: 0,
program_vm_addr: 0,
handler_anchors: HashMap::new(),
config: *_config,
}
}
fn compile<E: UserDefinedError, I: InstructionMeter>(&mut self,
executable: &dyn Executable<E, I>) -> Result<(), EbpfError<E>> {
let (program_vm_addr, program) = executable.get_text_bytes()?;
self.program_vm_addr = program_vm_addr;
self.generate_prologue::<E, I>()?;
let entry = executable.get_entrypoint_instruction_offset().unwrap_or(0);
if entry != 0 {
emit_profile_instruction_count(self, Some(entry + 1))?;
emit_load_imm(self, R11, entry as i64)?;
emit_jmp(self, entry)?;
}
while self.pc * ebpf::INSN_SIZE < program.len() {
let insn = ebpf::get_insn(program, self.pc);
self.result.pc_section[self.pc] = self.offset_in_text_section as u64;
if self.config.enable_instruction_tracing {
emit_load_imm(self, R11, self.pc as i64)?;
emit_call(self, TARGET_PC_TRACE)?;
}
let dst = REGISTER_MAP[insn.dst as usize];
let src = REGISTER_MAP[insn.src as usize];
let target_pc = (self.pc as isize + insn.off as isize + 1) as usize;
match insn.opc {
ebpf::LD_ABS_B => {
emit_address_translation(self, R11, Value::Constant64(ebpf::MM_INPUT_START.wrapping_add(insn.imm as u32 as u64) as i64), 1, AccessType::Load)?;
emit_load(self, OperandSize::S8, R11, RAX, 0)?;
},
ebpf::LD_ABS_H => {
emit_address_translation(self, R11, Value::Constant64(ebpf::MM_INPUT_START.wrapping_add(insn.imm as u32 as u64) as i64), 2, AccessType::Load)?;
emit_load(self, OperandSize::S16, R11, RAX, 0)?;
},
ebpf::LD_ABS_W => {
emit_address_translation(self, R11, Value::Constant64(ebpf::MM_INPUT_START.wrapping_add(insn.imm as u32 as u64) as i64), 4, AccessType::Load)?;
emit_load(self, OperandSize::S32, R11, RAX, 0)?;
},
ebpf::LD_ABS_DW => {
emit_address_translation(self, R11, Value::Constant64(ebpf::MM_INPUT_START.wrapping_add(insn.imm as u32 as u64) as i64), 8, AccessType::Load)?;
emit_load(self, OperandSize::S64, R11, RAX, 0)?;
},
ebpf::LD_IND_B => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(src, ebpf::MM_INPUT_START.wrapping_add(insn.imm as u32 as u64) as i64), 1, AccessType::Load)?;
emit_load(self, OperandSize::S8, R11, RAX, 0)?;
},
ebpf::LD_IND_H => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(src, ebpf::MM_INPUT_START.wrapping_add(insn.imm as u32 as u64) as i64), 2, AccessType::Load)?;
emit_load(self, OperandSize::S16, R11, RAX, 0)?;
},
ebpf::LD_IND_W => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(src, ebpf::MM_INPUT_START.wrapping_add(insn.imm as u32 as u64) as i64), 4, AccessType::Load)?;
emit_load(self, OperandSize::S32, R11, RAX, 0)?;
},
ebpf::LD_IND_DW => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(src, ebpf::MM_INPUT_START.wrapping_add(insn.imm as u32 as u64) as i64), 8, AccessType::Load)?;
emit_load(self, OperandSize::S64, R11, RAX, 0)?;
},
ebpf::LD_DW_IMM => {
emit_validate_and_profile_instruction_count(self, true, Some(self.pc + 2))?;
self.pc += 1;
self.pc_section_jumps.push(Jump { location: self.pc, target_pc: TARGET_PC_CALL_UNSUPPORTED_INSTRUCTION });
let second_part = ebpf::get_insn(program, self.pc).imm as u64;
let imm = (insn.imm as u32) as u64 | second_part.wrapping_shl(32);
emit_load_imm(self, dst, imm as i64)?;
},
ebpf::LD_B_REG => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(src, insn.off as i64), 1, AccessType::Load)?;
emit_load(self, OperandSize::S8, R11, dst, 0)?;
},
ebpf::LD_H_REG => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(src, insn.off as i64), 2, AccessType::Load)?;
emit_load(self, OperandSize::S16, R11, dst, 0)?;
},
ebpf::LD_W_REG => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(src, insn.off as i64), 4, AccessType::Load)?;
emit_load(self, OperandSize::S32, R11, dst, 0)?;
},
ebpf::LD_DW_REG => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(src, insn.off as i64), 8, AccessType::Load)?;
emit_load(self, OperandSize::S64, R11, dst, 0)?;
},
ebpf::ST_B_IMM => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(dst, insn.off as i64), 1, AccessType::Store)?;
emit_store_imm32(self, OperandSize::S8, R11, 0, insn.imm)?;
},
ebpf::ST_H_IMM => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(dst, insn.off as i64), 2, AccessType::Store)?;
emit_store_imm32(self, OperandSize::S16, R11, 0, insn.imm)?;
},
ebpf::ST_W_IMM => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(dst, insn.off as i64), 4, AccessType::Store)?;
emit_store_imm32(self, OperandSize::S32, R11, 0, insn.imm)?;
},
ebpf::ST_DW_IMM => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(dst, insn.off as i64), 8, AccessType::Store)?;
emit_store_imm32(self, OperandSize::S64, R11, 0, insn.imm)?;
},
ebpf::ST_B_REG => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(dst, insn.off as i64), 1, AccessType::Store)?;
emit_store(self, OperandSize::S8, src, R11, 0)?;
},
ebpf::ST_H_REG => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(dst, insn.off as i64), 2, AccessType::Store)?;
emit_store(self, OperandSize::S16, src, R11, 0)?;
},
ebpf::ST_W_REG => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(dst, insn.off as i64), 4, AccessType::Store)?;
emit_store(self, OperandSize::S32, src, R11, 0)?;
},
ebpf::ST_DW_REG => {
emit_address_translation(self, R11, Value::RegisterPlusConstant64(dst, insn.off as i64), 8, AccessType::Store)?;
emit_store(self, OperandSize::S64, src, R11, 0)?;
},
ebpf::ADD32_IMM => {
emit_alu(self, OperationWidth::Bit32, 0x81, 0, dst, insn.imm, None)?;
sign_extend_i32_to_i64(self, dst, dst)?;
},
ebpf::ADD32_REG => {
emit_alu(self, OperationWidth::Bit32, 0x01, src, dst, 0, None)?;
sign_extend_i32_to_i64(self, dst, dst)?;
},
ebpf::SUB32_IMM => {
emit_alu(self, OperationWidth::Bit32, 0x81, 5, dst, insn.imm, None)?;
sign_extend_i32_to_i64(self, dst, dst)?;
},
ebpf::SUB32_REG => {
emit_alu(self, OperationWidth::Bit32, 0x29, src, dst, 0, None)?;
sign_extend_i32_to_i64(self, dst, dst)?;
},
ebpf::MUL32_IMM | ebpf::DIV32_IMM | ebpf::MOD32_IMM =>
emit_muldivmod(self, insn.opc, dst, dst, Some(insn.imm))?,
ebpf::MUL32_REG | ebpf::DIV32_REG | ebpf::MOD32_REG =>
emit_muldivmod(self, insn.opc, src, dst, None)?,
ebpf::OR32_IMM => emit_alu(self, OperationWidth::Bit32, 0x81, 1, dst, insn.imm, None)?,
ebpf::OR32_REG => emit_alu(self, OperationWidth::Bit32, 0x09, src, dst, 0, None)?,
ebpf::AND32_IMM => emit_alu(self, OperationWidth::Bit32, 0x81, 4, dst, insn.imm, None)?,
ebpf::AND32_REG => emit_alu(self, OperationWidth::Bit32, 0x21, src, dst, 0, None)?,
ebpf::LSH32_IMM => emit_alu(self, OperationWidth::Bit32, 0xc1, 4, dst, insn.imm, None)?,
ebpf::LSH32_REG => emit_shift(self, OperationWidth::Bit32, 4, src, dst)?,
ebpf::RSH32_IMM => emit_alu(self, OperationWidth::Bit32, 0xc1, 5, dst, insn.imm, None)?,
ebpf::RSH32_REG => emit_shift(self, OperationWidth::Bit32, 5, src, dst)?,
ebpf::NEG32 => emit_alu(self, OperationWidth::Bit32, 0xf7, 3, dst, 0, None)?,
ebpf::XOR32_IMM => emit_alu(self, OperationWidth::Bit32, 0x81, 6, dst, insn.imm, None)?,
ebpf::XOR32_REG => emit_alu(self, OperationWidth::Bit32, 0x31, src, dst, 0, None)?,
ebpf::MOV32_IMM => emit_alu(self, OperationWidth::Bit32, 0xc7, 0, dst, insn.imm, None)?,
ebpf::MOV32_REG => emit_mov(self, OperationWidth::Bit32, src, dst)?,
ebpf::ARSH32_IMM => emit_alu(self, OperationWidth::Bit32, 0xc1, 7, dst, insn.imm, None)?,
ebpf::ARSH32_REG => emit_shift(self, OperationWidth::Bit32, 7, src, dst)?,
ebpf::LE => {
match insn.imm {
16 => {
emit_alu(self, OperationWidth::Bit32, 0x81, 4, dst, 0xffff, None)?;
}
32 => {
emit_alu(self, OperationWidth::Bit32, 0x81, 4, dst, -1, None)?;
}
64 => {}
_ => {
return Err(EbpfError::InvalidInstruction(self.pc + ebpf::ELF_INSN_DUMP_OFFSET));
}
}
},
ebpf::BE => {
match insn.imm {
16 => {
emit::<u8, E>(self, 0x66)?;
emit_alu(self, OperationWidth::Bit32, 0xc1, 0, dst, 8, None)?;
emit_alu(self, OperationWidth::Bit32, 0x81, 4, dst, 0xffff, None)?;
}
32 | 64 => {
let bit = match insn.imm { 64 => 1, _ => 0 };
emit_basic_rex(self, bit, 0, dst)?;
emit::<u8, E>(self, 0x0f)?;
emit::<u8, E>(self, 0xc8 | (dst & 0b111))?;
}
_ => {
return Err(EbpfError::InvalidInstruction(self.pc + ebpf::ELF_INSN_DUMP_OFFSET));
}
}
},
ebpf::ADD64_IMM => emit_alu(self, OperationWidth::Bit64, 0x81, 0, dst, insn.imm, None)?,
ebpf::ADD64_REG => emit_alu(self, OperationWidth::Bit64, 0x01, src, dst, 0, None)?,
ebpf::SUB64_IMM => emit_alu(self, OperationWidth::Bit64, 0x81, 5, dst, insn.imm, None)?,
ebpf::SUB64_REG => emit_alu(self, OperationWidth::Bit64, 0x29, src, dst, 0, None)?,
ebpf::MUL64_IMM | ebpf::DIV64_IMM | ebpf::MOD64_IMM =>
emit_muldivmod(self, insn.opc, dst, dst, Some(insn.imm))?,
ebpf::MUL64_REG | ebpf::DIV64_REG | ebpf::MOD64_REG =>
emit_muldivmod(self, insn.opc, src, dst, None)?,
ebpf::OR64_IMM => emit_alu(self, OperationWidth::Bit64, 0x81, 1, dst, insn.imm, None)?,
ebpf::OR64_REG => emit_alu(self, OperationWidth::Bit64, 0x09, src, dst, 0, None)?,
ebpf::AND64_IMM => emit_alu(self, OperationWidth::Bit64, 0x81, 4, dst, insn.imm, None)?,
ebpf::AND64_REG => emit_alu(self, OperationWidth::Bit64, 0x21, src, dst, 0, None)?,
ebpf::LSH64_IMM => emit_alu(self, OperationWidth::Bit64, 0xc1, 4, dst, insn.imm, None)?,
ebpf::LSH64_REG => emit_shift(self, OperationWidth::Bit64, 4, src, dst)?,
ebpf::RSH64_IMM => emit_alu(self, OperationWidth::Bit64, 0xc1, 5, dst, insn.imm, None)?,
ebpf::RSH64_REG => emit_shift(self, OperationWidth::Bit64, 5, src, dst)?,
ebpf::NEG64 => emit_alu(self, OperationWidth::Bit64, 0xf7, 3, dst, 0, None)?,
ebpf::XOR64_IMM => emit_alu(self, OperationWidth::Bit64, 0x81, 6, dst, insn.imm, None)?,
ebpf::XOR64_REG => emit_alu(self, OperationWidth::Bit64, 0x31, src, dst, 0, None)?,
ebpf::MOV64_IMM => emit_load_imm(self, dst, insn.imm as i64)?,
ebpf::MOV64_REG => emit_mov(self, OperationWidth::Bit64, src, dst)?,
ebpf::ARSH64_IMM => emit_alu(self, OperationWidth::Bit64, 0xc1, 7, dst, insn.imm, None)?,
ebpf::ARSH64_REG => emit_shift(self, OperationWidth::Bit64, 7, src, dst)?,
ebpf::JA => {
emit_validate_and_profile_instruction_count(self, false, Some(target_pc))?;
emit_jmp(self, target_pc)?;
},
ebpf::JEQ_IMM => emit_conditional_branch_imm(self, 0x84, insn.imm, dst, target_pc)?,
ebpf::JEQ_REG => emit_conditional_branch_reg(self, 0x84, src, dst, target_pc)?,
ebpf::JGT_IMM => emit_conditional_branch_imm(self, 0x87, insn.imm, dst, target_pc)?,
ebpf::JGT_REG => emit_conditional_branch_reg(self, 0x87, src, dst, target_pc)?,
ebpf::JGE_IMM => emit_conditional_branch_imm(self, 0x83, insn.imm, dst, target_pc)?,
ebpf::JGE_REG => emit_conditional_branch_reg(self, 0x83, src, dst, target_pc)?,
ebpf::JLT_IMM => emit_conditional_branch_imm(self, 0x82, insn.imm, dst, target_pc)?,
ebpf::JLT_REG => emit_conditional_branch_reg(self, 0x82, src, dst, target_pc)?,
ebpf::JLE_IMM => emit_conditional_branch_imm(self, 0x86, insn.imm, dst, target_pc)?,
ebpf::JLE_REG => emit_conditional_branch_reg(self, 0x86, src, dst, target_pc)?,
ebpf::JSET_IMM => {
emit_validate_and_profile_instruction_count(self, false, Some(target_pc))?;
emit_alu(self, OperationWidth::Bit64, 0xf7, 0, dst, insn.imm, None)?;
emit_jcc(self, 0x85, target_pc)?;
emit_undo_profile_instruction_count(self, target_pc)?;
},
ebpf::JSET_REG => {
emit_validate_and_profile_instruction_count(self, false, Some(target_pc))?;
emit_alu(self, OperationWidth::Bit64, 0x85, src, dst, 0, None)?;
emit_jcc(self, 0x85, target_pc)?;
emit_undo_profile_instruction_count(self, target_pc)?;
},
ebpf::JNE_IMM => emit_conditional_branch_imm(self, 0x85, insn.imm, dst, target_pc)?,
ebpf::JNE_REG => emit_conditional_branch_reg(self, 0x85, src, dst, target_pc)?,
ebpf::JSGT_IMM => emit_conditional_branch_imm(self, 0x8f, insn.imm, dst, target_pc)?,
ebpf::JSGT_REG => emit_conditional_branch_reg(self, 0x8f, src, dst, target_pc)?,
ebpf::JSGE_IMM => emit_conditional_branch_imm(self, 0x8d, insn.imm, dst, target_pc)?,
ebpf::JSGE_REG => emit_conditional_branch_reg(self, 0x8d, src, dst, target_pc)?,
ebpf::JSLT_IMM => emit_conditional_branch_imm(self, 0x8c, insn.imm, dst, target_pc)?,
ebpf::JSLT_REG => emit_conditional_branch_reg(self, 0x8c, src, dst, target_pc)?,
ebpf::JSLE_IMM => emit_conditional_branch_imm(self, 0x8e, insn.imm, dst, target_pc)?,
ebpf::JSLE_REG => emit_conditional_branch_reg(self, 0x8e, src, dst, target_pc)?,
ebpf::CALL_IMM => {
if let Some(syscall) = executable.get_syscall_registry().lookup_syscall(insn.imm as u32) {
if self.config.enable_instruction_meter {
emit_validate_and_profile_instruction_count(self, true, Some(0))?;
emit_load(self, OperandSize::S64, RBP, R11, -8 * (CALLEE_SAVED_REGISTERS.len() + 2) as i32)?;
emit_alu(self, OperationWidth::Bit64, 0x29, ARGUMENT_REGISTERS[0], R11, 0, None)?;
emit_mov(self, OperationWidth::Bit64, R11, ARGUMENT_REGISTERS[0])?;
emit_load(self, OperandSize::S64, RBP, R11, -8 * (CALLEE_SAVED_REGISTERS.len() + 3) as i32)?;
emit_rust_call(self, I::consume as *const u8, &[
Argument { index: 1, value: Value::Register(ARGUMENT_REGISTERS[0]) },
Argument { index: 0, value: Value::Register(R11) },
], None, false)?;
}
emit_load(self, OperandSize::S64, R10, RAX, (SYSCALL_CONTEXT_OBJECTS_OFFSET + syscall.context_object_slot) as i32 * 8)?;
emit_rust_call(self, syscall.function as *const u8, &[
Argument { index: 0, value: Value::Register(RAX) },
Argument { index: 1, value: Value::Register(ARGUMENT_REGISTERS[1]) },
Argument { index: 2, value: Value::Register(ARGUMENT_REGISTERS[2]) },
Argument { index: 3, value: Value::Register(ARGUMENT_REGISTERS[3]) },
Argument { index: 4, value: Value::Register(ARGUMENT_REGISTERS[4]) },
Argument { index: 5, value: Value::Register(ARGUMENT_REGISTERS[5]) },
Argument { index: 6, value: Value::Register(R10) },
Argument { index: 7, value: Value::RegisterIndirect(RBP, -8 * (CALLEE_SAVED_REGISTERS.len() + 1) as i32) },
], None, true)?;
emit_load_imm(self, R11, self.pc as i64)?;
emit_jcc(self, 0x85, TARGET_PC_SYSCALL_EXCEPTION)?;
emit_load(self, OperandSize::S64, RBP, R11, -8 * (CALLEE_SAVED_REGISTERS.len() + 1) as i32)?;
emit_load(self, OperandSize::S64, R11, REGISTER_MAP[0], 8)?;
if self.config.enable_instruction_meter {
emit_load(self, OperandSize::S64, RBP, R11, -8 * (CALLEE_SAVED_REGISTERS.len() + 3) as i32)?;
emit_rust_call(self, I::get_remaining as *const u8, &[
Argument { index: 0, value: Value::Register(R11) },
], Some(ARGUMENT_REGISTERS[0]), false)?;
emit_store(self, OperandSize::S64, ARGUMENT_REGISTERS[0], RBP, -8 * (CALLEE_SAVED_REGISTERS.len() + 2) as i32)?;
emit_undo_profile_instruction_count(self, 0)?;
}
} else {
match executable.lookup_bpf_function(insn.imm as u32) {
Some(target_pc) => {
emit_bpf_call(self, Value::Constant64(*target_pc as i64), self.result.pc_section.len() - 1)?;
},
None => {
let fat_ptr: DynTraitFatPointer = unsafe { std::mem::transmute(executable) };
emit_rust_call(self, fat_ptr.vtable.methods[10], &[
Argument { index: 0, value: Value::RegisterIndirect(RBP, -8 * (CALLEE_SAVED_REGISTERS.len() + 1) as i32) },
Argument { index: 1, value: Value::Constant64(fat_ptr.data as i64) },
Argument { index: 2, value: Value::Constant64(self.pc as i64) },
], None, true)?;
emit_load_imm(self, R11, self.pc as i64)?;
emit_jmp(self, TARGET_PC_SYSCALL_EXCEPTION)?;
},
}
}
},
ebpf::CALL_REG => {
emit_bpf_call(self, Value::Register(REGISTER_MAP[insn.imm as usize]), self.result.pc_section.len() - 1)?;
},
ebpf::EXIT => {
emit_validate_and_profile_instruction_count(self, true, Some(0))?;
emit_load(self, OperandSize::S64, RBP, REGISTER_MAP[STACK_REG], -8 * CALLEE_SAVED_REGISTERS.len() as i32)?;
emit_alu(self, OperationWidth::Bit64, 0x81, 4, REGISTER_MAP[STACK_REG], !(self.config.stack_frame_size as i32 * 2 - 1), None)?;
emit_alu(self, OperationWidth::Bit64, 0x81, 5, REGISTER_MAP[STACK_REG], self.config.stack_frame_size as i32 * 2, None)?;
emit_store(self, OperandSize::S64, REGISTER_MAP[STACK_REG], RBP, -8 * CALLEE_SAVED_REGISTERS.len() as i32)?;
emit_mov(self, OperationWidth::Bit64, REGISTER_MAP[0], R11)?;
emit_load_imm(self, REGISTER_MAP[0], MM_STACK_START as i64)?;
emit_cmp(self, REGISTER_MAP[0], REGISTER_MAP[STACK_REG], None)?;
emit_mov(self, OperationWidth::Bit64, R11, REGISTER_MAP[0])?;
emit_jcc(self, 0x82, TARGET_PC_EXIT)?;
emit::<u8, E>(self, 0xc3)?;
},
_ => return Err(EbpfError::UnsupportedInstruction(self.pc + ebpf::ELF_INSN_DUMP_OFFSET)),
}
self.pc += 1;
}
self.result.pc_section[self.pc] = self.offset_in_text_section as u64;
emit_validate_and_profile_instruction_count(self, true, Some(self.pc + 2))?;
emit_load_imm(self, R11, self.pc as i64)?;
emit_set_exception_kind::<E>(self, EbpfError::ExecutionOverrun(0))?;
emit_jmp(self, TARGET_PC_EXCEPTION_AT)?;
self.generate_helper_routines::<E>()?;
self.generate_exception_handlers::<E>()?;
self.generate_epilogue::<E>()?;
self.resolve_jumps();
self.result.seal();
Ok(())
}
fn generate_helper_routines<E: UserDefinedError>(&mut self) -> Result<(), EbpfError<E>> {
if self.config.enable_instruction_tracing {
set_anchor(self, TARGET_PC_TRACE);
emit_push(self, R11)?;
for reg in REGISTER_MAP.iter().rev() {
emit_push(self, *reg)?;
}
emit_mov(self, OperationWidth::Bit64, RSP, REGISTER_MAP[0])?;
emit_alu(self, OperationWidth::Bit64, 0x81, 0, RSP, - 8 * 3, None)?;
emit_rust_call(self, Tracer::trace as *const u8, &[
Argument { index: 0, value: Value::RegisterIndirect(R10, std::mem::size_of::<MemoryMapping>() as i32) },
Argument { index: 1, value: Value::Register(REGISTER_MAP[0]) },
], None, false)?;
emit_alu(self, OperationWidth::Bit64, 0x81, 0, RSP, 8 * 3, None)?;
emit_pop(self, REGISTER_MAP[0])?;
emit_alu(self, OperationWidth::Bit64, 0x81, 0, RSP, 8 * (REGISTER_MAP.len() - 1) as i32, None)?;
emit_pop(self, R11)?;
emit::<u8, E>(self, 0xc3)?;
}
set_anchor(self, TARGET_PC_TRANSLATE_PC);
emit_push(self, REGISTER_MAP[0])?;
emit_load_imm(self, REGISTER_MAP[0], self.result.pc_section.as_ptr() as i64 - 8)?;
set_anchor(self, TARGET_PC_TRANSLATE_PC_LOOP);
emit_alu(self, OperationWidth::Bit64, 0x81, 0, REGISTER_MAP[0], 8, None)?;
emit_cmp(self, R11, REGISTER_MAP[0], Some(0))?;
emit_jcc(self, 0x82, TARGET_PC_TRANSLATE_PC_LOOP)?;
emit_mov(self, OperationWidth::Bit64, REGISTER_MAP[0], R11)?;
emit_load_imm(self, REGISTER_MAP[0], self.result.pc_section.as_ptr() as i64)?;
emit_alu(self, OperationWidth::Bit64, 0x29, REGISTER_MAP[0], R11, 0, None)?;
emit_alu(self, OperationWidth::Bit64, 0xc1, 5, R11, 3, None)?;
emit_pop(self, REGISTER_MAP[0])?;
emit::<u8, E>(self, 0xc3)
}
fn generate_exception_handlers<E: UserDefinedError>(&mut self) -> Result<(), EbpfError<E>> {
set_anchor(self, TARGET_PC_CALL_EXCEEDED_MAX_INSTRUCTIONS);
emit_mov(self, OperationWidth::Bit64, ARGUMENT_REGISTERS[0], R11)?;
emit_set_exception_kind::<E>(self, EbpfError::ExceededMaxInstructions(0, 0))?;
emit_jmp(self, TARGET_PC_EXCEPTION_AT)?;
set_anchor(self, TARGET_PC_CALL_DEPTH_EXCEEDED);
emit_set_exception_kind::<E>(self, EbpfError::CallDepthExceeded(0, 0))?;
emit_store_imm32(self, OperandSize::S64, R10, 24, self.config.max_call_depth as i32)?;
emit_jmp(self, TARGET_PC_EXCEPTION_AT)?;
set_anchor(self, TARGET_PC_CALL_OUTSIDE_TEXT_SEGMENT);
emit_set_exception_kind::<E>(self, EbpfError::CallOutsideTextSegment(0, 0))?;
emit_store(self, OperandSize::S64, REGISTER_MAP[0], R10, 24)?;
emit_jmp(self, TARGET_PC_EXCEPTION_AT)?;
set_anchor(self, TARGET_PC_DIV_BY_ZERO);
emit_set_exception_kind::<E>(self, EbpfError::DivideByZero(0))?;
emit_jmp(self, TARGET_PC_EXCEPTION_AT)?;
set_anchor(self, TARGET_PC_CALLX_UNSUPPORTED_INSTRUCTION);
emit_call(self, TARGET_PC_TRANSLATE_PC)?;
set_anchor(self, TARGET_PC_CALL_UNSUPPORTED_INSTRUCTION);
if self.config.enable_instruction_tracing {
emit_call(self, TARGET_PC_TRACE)?;
}
emit_set_exception_kind::<E>(self, EbpfError::UnsupportedInstruction(0))?;
set_anchor(self, TARGET_PC_EXCEPTION_AT);
emit_profile_instruction_count_of_exception(self)?;
emit_load(self, OperandSize::S64, RBP, R10, -8 * (CALLEE_SAVED_REGISTERS.len() + 1) as i32)?;
emit_store_imm32(self, OperandSize::S64, R10, 0, 1)?;
emit_alu(self, OperationWidth::Bit64, 0x81, 0, R11, ebpf::ELF_INSN_DUMP_OFFSET as i32 - 1, None)?;
emit_store(self, OperandSize::S64, R11, R10, 16)?;
emit_jmp(self, TARGET_PC_EPILOGUE)?;
set_anchor(self, TARGET_PC_SYSCALL_EXCEPTION);
emit_profile_instruction_count_of_exception(self)?;
emit_jmp(self, TARGET_PC_EPILOGUE)
}
fn generate_prologue<E: UserDefinedError, I: InstructionMeter>(&mut self) -> Result<(), EbpfError<E>> {
for reg in CALLEE_SAVED_REGISTERS.iter() {
emit_push(self, *reg)?;
if *reg == RBP {
emit_mov(self, OperationWidth::Bit64, RSP, RBP)?;
}
}
emit_mov(self, OperationWidth::Bit64, ARGUMENT_REGISTERS[2], R10)?;
emit_load_imm(self, REGISTER_MAP[STACK_REG], MM_STACK_START as i64 + self.config.stack_frame_size as i64)?;
emit_push(self, REGISTER_MAP[STACK_REG])?;
emit_push(self, ARGUMENT_REGISTERS[0])?;
emit_rust_call(self, I::get_remaining as *const u8, &[
Argument { index: 0, value: Value::Register(ARGUMENT_REGISTERS[3]) },
], Some(ARGUMENT_REGISTERS[0]), false)?;
emit_push(self, ARGUMENT_REGISTERS[0])?;
emit_push(self, ARGUMENT_REGISTERS[3])?;
for reg in REGISTER_MAP.iter() {
if *reg != REGISTER_MAP[1] && *reg != REGISTER_MAP[STACK_REG] {
emit_load_imm(self, *reg, 0)?;
}
}
Ok(())
}
fn generate_epilogue<E: UserDefinedError>(&mut self) -> Result<(), EbpfError<E>> {
set_anchor(self, TARGET_PC_EXIT);
emit_load(self, OperandSize::S64, RBP, R10, -8 * (CALLEE_SAVED_REGISTERS.len() + 1) as i32)?;
emit_store(self, OperandSize::S64, REGISTER_MAP[0], R10, 8)?;
emit_load_imm(self, REGISTER_MAP[0], 0)?;
emit_store(self, OperandSize::S64, REGISTER_MAP[0], R10, 0)?;
set_anchor(self, TARGET_PC_EPILOGUE);
emit_mov(self, OperationWidth::Bit64, ARGUMENT_REGISTERS[0], RAX)?;
emit_mov(self, OperationWidth::Bit64, RBP, R11)?;
emit_alu(self, OperationWidth::Bit64, 0x81, 5, R11, 8 * (CALLEE_SAVED_REGISTERS.len()-1) as i32, None)?;
emit_mov(self, OperationWidth::Bit64, R11, RSP)?;
for reg in CALLEE_SAVED_REGISTERS.iter().rev() {
emit_pop(self, *reg)?;
}
emit::<u8, E>(self, 0xc3)
}
fn resolve_jumps(&mut self) {
for jump in &self.pc_section_jumps {
self.result.pc_section[jump.location] = jump.get_target_offset(&self);
}
for jump in &self.text_section_jumps {
let offset_value = jump.get_target_offset(&self) as i32
- jump.location as i32
- std::mem::size_of::<i32>() as i32;
unsafe {
libc::memcpy(
self.result.text_section.as_ptr().add(jump.location) as *mut libc::c_void,
&offset_value as *const i32 as *const libc::c_void,
std::mem::size_of::<i32>(),
);
}
}
let call_unsupported_instruction = self.handler_anchors.get(&TARGET_PC_CALL_UNSUPPORTED_INSTRUCTION).unwrap();
let callx_unsupported_instruction = self.handler_anchors.get(&TARGET_PC_CALLX_UNSUPPORTED_INSTRUCTION).unwrap();
for offset in self.result.pc_section.iter_mut() {
if *offset == *call_unsupported_instruction as u64 {
*offset = *callx_unsupported_instruction as u64;
}
*offset = unsafe { (self.result.text_section.as_ptr() as *const u8).add(*offset as usize) } as u64;
}
}
}