};
typedef int (*uprobe_write_verify_t)(struct page *page, unsigned long vaddr,
- uprobe_opcode_t *opcode);
+ uprobe_opcode_t *insn, int nbytes);
extern void __init uprobes_init(void);
extern int set_swbp(struct arch_uprobe *aup, struct vm_area_struct *vma, unsigned long vaddr);
extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct vm_area_struct *vma, unsigned long vaddr, uprobe_opcode_t);
extern int uprobe_write(struct arch_uprobe *auprobe, struct vm_area_struct *vma, const unsigned long opcode_vaddr,
- uprobe_opcode_t opcode, uprobe_write_verify_t verify);
+ uprobe_opcode_t *insn, int nbytes, uprobe_write_verify_t verify);
extern struct uprobe *uprobe_register(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc);
extern int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool);
extern void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc);
kunmap_atomic(kaddr);
}
-static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
+static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *insn,
+ int nbytes)
{
uprobe_opcode_t old_opcode;
bool is_swbp;
uprobe_copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
is_swbp = is_swbp_insn(&old_opcode);
- if (is_swbp_insn(new_opcode)) {
+ if (is_swbp_insn(insn)) {
if (is_swbp) /* register: already installed? */
return 0;
} else {
static int __uprobe_write(struct vm_area_struct *vma,
struct folio_walk *fw, struct folio *folio,
- unsigned long opcode_vaddr, uprobe_opcode_t opcode)
+ unsigned long insn_vaddr, uprobe_opcode_t *insn, int nbytes)
{
- const unsigned long vaddr = opcode_vaddr & PAGE_MASK;
- const bool is_register = !!is_swbp_insn(&opcode);
+ const unsigned long vaddr = insn_vaddr & PAGE_MASK;
+ const bool is_register = !!is_swbp_insn(insn);
bool pmd_mappable;
/* For now, we'll only handle PTE-mapped folios. */
*/
flush_cache_page(vma, vaddr, pte_pfn(fw->pte));
fw->pte = ptep_clear_flush(vma, vaddr, fw->ptep);
- copy_to_page(fw->page, opcode_vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
+ copy_to_page(fw->page, insn_vaddr, insn, nbytes);
/*
* When unregistering, we may only zap a PTE if uffd is disabled and
int uprobe_write_opcode(struct arch_uprobe *auprobe, struct vm_area_struct *vma,
const unsigned long opcode_vaddr, uprobe_opcode_t opcode)
{
- return uprobe_write(auprobe, vma, opcode_vaddr, opcode, verify_opcode);
+ return uprobe_write(auprobe, vma, opcode_vaddr, &opcode, UPROBE_SWBP_INSN_SIZE, verify_opcode);
}
int uprobe_write(struct arch_uprobe *auprobe, struct vm_area_struct *vma,
- const unsigned long opcode_vaddr, uprobe_opcode_t opcode, uprobe_write_verify_t verify)
+ const unsigned long insn_vaddr, uprobe_opcode_t *insn, int nbytes,
+ uprobe_write_verify_t verify)
{
- const unsigned long vaddr = opcode_vaddr & PAGE_MASK;
+ const unsigned long vaddr = insn_vaddr & PAGE_MASK;
struct mm_struct *mm = vma->vm_mm;
struct uprobe *uprobe;
int ret, is_register, ref_ctr_updated = 0;
struct folio *folio;
struct page *page;
- is_register = is_swbp_insn(&opcode);
+ is_register = is_swbp_insn(insn);
uprobe = container_of(auprobe, struct uprobe, arch);
if (WARN_ON_ONCE(!is_cow_mapping(vma->vm_flags)))
goto out;
folio = page_folio(page);
- ret = verify(page, opcode_vaddr, &opcode);
+ ret = verify(page, insn_vaddr, insn, nbytes);
if (ret <= 0) {
folio_put(folio);
goto out;
/* Walk the page tables again, to perform the actual update. */
if (folio_walk_start(&fw, vma, vaddr, 0)) {
if (fw.page == page)
- ret = __uprobe_write(vma, &fw, folio, opcode_vaddr, opcode);
+ ret = __uprobe_write(vma, &fw, folio, insn_vaddr, insn, nbytes);
folio_walk_end(&fw, vma);
}