@@ -547,14 +547,14 @@ uc_err uc_reg_write(uc_engine *uc, int regid, const void *value)
547
547
548
548
// check if a memory area is mapped
549
549
// this is complicated because an area can overlap adjacent blocks
550
- static bool check_mem_area (uc_engine * uc , uint64_t address , size_t size )
550
+ static bool check_mem_area (uc_engine * uc , uint64_t address , uint64_t size )
551
551
{
552
- size_t count = 0 , len ;
552
+ uint64_t count = 0 , len ;
553
553
554
554
while (count < size ) {
555
555
MemoryRegion * mr = memory_mapping (uc , address );
556
556
if (mr ) {
557
- len = (size_t )MIN (size - count , mr -> end - address );
557
+ len = (uint64_t )MIN (size - count , mr -> end - address );
558
558
count += len ;
559
559
address += len ;
560
560
} else { // this address is not mapped in yet
@@ -566,17 +566,13 @@ static bool check_mem_area(uc_engine *uc, uint64_t address, size_t size)
566
566
}
567
567
568
568
UNICORN_EXPORT
569
- uc_err uc_mem_read (uc_engine * uc , uint64_t address , void * _bytes , size_t size )
569
+ uc_err uc_mem_read (uc_engine * uc , uint64_t address , void * _bytes , uint64_t size )
570
570
{
571
- size_t count = 0 , len ;
571
+ uint64_t count = 0 , len ;
572
572
uint8_t * bytes = _bytes ;
573
573
574
574
UC_INIT (uc );
575
575
576
- // qemu cpu_physical_memory_rw() size is an int
577
- if (size > INT_MAX )
578
- return UC_ERR_ARG ;
579
-
580
576
if (uc -> mem_redirect ) {
581
577
address = uc -> mem_redirect (address );
582
578
}
@@ -589,7 +585,7 @@ uc_err uc_mem_read(uc_engine *uc, uint64_t address, void *_bytes, size_t size)
589
585
while (count < size ) {
590
586
MemoryRegion * mr = memory_mapping (uc , address );
591
587
if (mr ) {
592
- len = (size_t )MIN (size - count , mr -> end - address );
588
+ len = (uint64_t )MIN (size - count , mr -> end - address );
593
589
if (uc -> read_mem (& uc -> address_space_memory , address , bytes , len ) ==
594
590
false) {
595
591
break ;
@@ -611,17 +607,13 @@ uc_err uc_mem_read(uc_engine *uc, uint64_t address, void *_bytes, size_t size)
611
607
612
608
UNICORN_EXPORT
613
609
uc_err uc_mem_write (uc_engine * uc , uint64_t address , const void * _bytes ,
614
- size_t size )
610
+ uint64_t size )
615
611
{
616
- size_t count = 0 , len ;
612
+ uint64_t count = 0 , len ;
617
613
const uint8_t * bytes = _bytes ;
618
614
619
615
UC_INIT (uc );
620
616
621
- // qemu cpu_physical_memory_rw() size is an int
622
- if (size > INT_MAX )
623
- return UC_ERR_ARG ;
624
-
625
617
if (uc -> mem_redirect ) {
626
618
address = uc -> mem_redirect (address );
627
619
}
@@ -641,7 +633,7 @@ uc_err uc_mem_write(uc_engine *uc, uint64_t address, const void *_bytes,
641
633
uc -> readonly_mem (mr , false);
642
634
}
643
635
644
- len = (size_t )MIN (size - count , mr -> end - address );
636
+ len = (uint64_t )MIN (size - count , mr -> end - address );
645
637
if (uc -> write_mem (& uc -> address_space_memory , address , bytes , len ) ==
646
638
false) {
647
639
break ;
@@ -956,7 +948,7 @@ static int bsearch_mapped_blocks(const uc_engine *uc, uint64_t address)
956
948
}
957
949
958
950
// find if a memory range overlaps with existing mapped regions
959
- static bool memory_overlap (struct uc_struct * uc , uint64_t begin , size_t size )
951
+ static bool memory_overlap (struct uc_struct * uc , uint64_t begin , uint64_t size )
960
952
{
961
953
unsigned int i ;
962
954
uint64_t end = begin + size - 1 ;
@@ -976,7 +968,7 @@ static bool memory_overlap(struct uc_struct *uc, uint64_t begin, size_t size)
976
968
}
977
969
978
970
// common setup/error checking shared between uc_mem_map and uc_mem_map_ptr
979
- static uc_err mem_map (uc_engine * uc , uint64_t address , size_t size ,
971
+ static uc_err mem_map (uc_engine * uc , uint64_t address , uint64_t size ,
980
972
uint32_t perms , MemoryRegion * block )
981
973
{
982
974
MemoryRegion * * regions ;
@@ -1008,7 +1000,7 @@ static uc_err mem_map(uc_engine *uc, uint64_t address, size_t size,
1008
1000
return UC_ERR_OK ;
1009
1001
}
1010
1002
1011
- static uc_err mem_map_check (uc_engine * uc , uint64_t address , size_t size ,
1003
+ static uc_err mem_map_check (uc_engine * uc , uint64_t address , uint64_t size ,
1012
1004
uint32_t perms )
1013
1005
{
1014
1006
if (size == 0 ) {
@@ -1045,7 +1037,7 @@ static uc_err mem_map_check(uc_engine *uc, uint64_t address, size_t size,
1045
1037
}
1046
1038
1047
1039
UNICORN_EXPORT
1048
- uc_err uc_mem_map (uc_engine * uc , uint64_t address , size_t size , uint32_t perms )
1040
+ uc_err uc_mem_map (uc_engine * uc , uint64_t address , uint64_t size , uint32_t perms )
1049
1041
{
1050
1042
uc_err res ;
1051
1043
@@ -1065,7 +1057,7 @@ uc_err uc_mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t perms)
1065
1057
}
1066
1058
1067
1059
UNICORN_EXPORT
1068
- uc_err uc_mem_map_ptr (uc_engine * uc , uint64_t address , size_t size ,
1060
+ uc_err uc_mem_map_ptr (uc_engine * uc , uint64_t address , uint64_t size ,
1069
1061
uint32_t perms , void * ptr )
1070
1062
{
1071
1063
uc_err res ;
@@ -1090,7 +1082,7 @@ uc_err uc_mem_map_ptr(uc_engine *uc, uint64_t address, size_t size,
1090
1082
}
1091
1083
1092
1084
UNICORN_EXPORT
1093
- uc_err uc_mmio_map (uc_engine * uc , uint64_t address , size_t size ,
1085
+ uc_err uc_mmio_map (uc_engine * uc , uint64_t address , uint64_t size ,
1094
1086
uc_cb_mmio_read_t read_cb , void * user_data_read ,
1095
1087
uc_cb_mmio_write_t write_cb , void * user_data_write )
1096
1088
{
@@ -1117,10 +1109,10 @@ uc_err uc_mmio_map(uc_engine *uc, uint64_t address, size_t size,
1117
1109
// Generally used in prepartion for splitting a MemoryRegion.
1118
1110
static uint8_t * copy_region (struct uc_struct * uc , MemoryRegion * mr )
1119
1111
{
1120
- uint8_t * block = (uint8_t * )g_malloc0 ((size_t )int128_get64 (mr -> size ));
1112
+ uint8_t * block = (uint8_t * )g_malloc0 ((uint64_t )int128_get64 (mr -> size ));
1121
1113
if (block != NULL ) {
1122
1114
uc_err err =
1123
- uc_mem_read (uc , mr -> addr , block , (size_t )int128_get64 (mr -> size ));
1115
+ uc_mem_read (uc , mr -> addr , block , (uint64_t )int128_get64 (mr -> size ));
1124
1116
if (err != UC_ERR_OK ) {
1125
1117
free (block );
1126
1118
block = NULL ;
@@ -1136,10 +1128,10 @@ static uint8_t *copy_region(struct uc_struct *uc, MemoryRegion *mr)
1136
1128
Note this function may be called recursively.
1137
1129
*/
1138
1130
static bool split_mmio_region (struct uc_struct * uc , MemoryRegion * mr ,
1139
- uint64_t address , size_t size , bool do_delete )
1131
+ uint64_t address , uint64_t size , bool do_delete )
1140
1132
{
1141
1133
uint64_t begin , end , chunk_end ;
1142
- size_t l_size , r_size , m_size ;
1134
+ uint64_t l_size , r_size , m_size ;
1143
1135
mmio_cbs backup ;
1144
1136
1145
1137
chunk_end = address + size ;
@@ -1166,7 +1158,7 @@ static bool split_mmio_region(struct uc_struct *uc, MemoryRegion *mr,
1166
1158
*/
1167
1159
1168
1160
// unmap this region first, then do split it later
1169
- if (uc_mem_unmap (uc , mr -> addr , (size_t )int128_get64 (mr -> size )) !=
1161
+ if (uc_mem_unmap (uc , mr -> addr , (uint64_t )int128_get64 (mr -> size )) !=
1170
1162
UC_ERR_OK ) {
1171
1163
return false;
1172
1164
}
@@ -1180,9 +1172,9 @@ static bool split_mmio_region(struct uc_struct *uc, MemoryRegion *mr,
1180
1172
}
1181
1173
1182
1174
// compute sub region sizes
1183
- l_size = (size_t )(address - begin );
1184
- r_size = (size_t )(end - chunk_end );
1185
- m_size = (size_t )(chunk_end - address );
1175
+ l_size = (uint64_t )(address - begin );
1176
+ r_size = (uint64_t )(end - chunk_end );
1177
+ m_size = (uint64_t )(chunk_end - address );
1186
1178
1187
1179
if (l_size > 0 ) {
1188
1180
if (uc_mmio_map (uc , begin , l_size , backup .read , backup .user_data_read ,
@@ -1225,12 +1217,12 @@ static bool split_mmio_region(struct uc_struct *uc, MemoryRegion *mr,
1225
1217
// TODO: investigate whether qemu region manipulation functions already offered
1226
1218
// this capability
1227
1219
static bool split_region (struct uc_struct * uc , MemoryRegion * mr ,
1228
- uint64_t address , size_t size , bool do_delete )
1220
+ uint64_t address , uint64_t size , bool do_delete )
1229
1221
{
1230
1222
uint8_t * backup ;
1231
1223
uint32_t perms ;
1232
1224
uint64_t begin , end , chunk_end ;
1233
- size_t l_size , m_size , r_size ;
1225
+ uint64_t l_size , m_size , r_size ;
1234
1226
RAMBlock * block = NULL ;
1235
1227
bool prealloc = false;
1236
1228
@@ -1287,7 +1279,7 @@ static bool split_region(struct uc_struct *uc, MemoryRegion *mr,
1287
1279
end = mr -> end ;
1288
1280
1289
1281
// unmap this region first, then do split it later
1290
- if (uc_mem_unmap (uc , mr -> addr , (size_t )int128_get64 (mr -> size )) !=
1282
+ if (uc_mem_unmap (uc , mr -> addr , (uint64_t )int128_get64 (mr -> size )) !=
1291
1283
UC_ERR_OK ) {
1292
1284
goto error ;
1293
1285
}
@@ -1308,9 +1300,9 @@ static bool split_region(struct uc_struct *uc, MemoryRegion *mr,
1308
1300
}
1309
1301
1310
1302
// compute sub region sizes
1311
- l_size = (size_t )(address - begin );
1312
- r_size = (size_t )(end - chunk_end );
1313
- m_size = (size_t )(chunk_end - address );
1303
+ l_size = (uint64_t )(address - begin );
1304
+ r_size = (uint64_t )(end - chunk_end );
1305
+ m_size = (uint64_t )(chunk_end - address );
1314
1306
1315
1307
// If there are error in any of the below operations, things are too far
1316
1308
// gone at that point to recover. Could try to remap orignal region, but
@@ -1378,13 +1370,13 @@ static bool split_region(struct uc_struct *uc, MemoryRegion *mr,
1378
1370
}
1379
1371
1380
1372
UNICORN_EXPORT
1381
- uc_err uc_mem_protect (struct uc_struct * uc , uint64_t address , size_t size ,
1373
+ uc_err uc_mem_protect (struct uc_struct * uc , uint64_t address , uint64_t size ,
1382
1374
uint32_t perms )
1383
1375
{
1384
1376
MemoryRegion * mr ;
1385
1377
uint64_t addr = address ;
1386
1378
uint64_t pc ;
1387
- size_t count , len ;
1379
+ uint64_t count , len ;
1388
1380
bool remove_exec = false;
1389
1381
1390
1382
UC_INIT (uc );
@@ -1424,7 +1416,7 @@ uc_err uc_mem_protect(struct uc_struct *uc, uint64_t address, size_t size,
1424
1416
count = 0 ;
1425
1417
while (count < size ) {
1426
1418
mr = memory_mapping (uc , addr );
1427
- len = (size_t )MIN (size - count , mr -> end - addr );
1419
+ len = (uint64_t )MIN (size - count , mr -> end - addr );
1428
1420
if (mr -> ram ) {
1429
1421
if (!split_region (uc , mr , addr , len , false)) {
1430
1422
return UC_ERR_NOMEM ;
@@ -1466,11 +1458,11 @@ uc_err uc_mem_protect(struct uc_struct *uc, uint64_t address, size_t size,
1466
1458
}
1467
1459
1468
1460
UNICORN_EXPORT
1469
- uc_err uc_mem_unmap (struct uc_struct * uc , uint64_t address , size_t size )
1461
+ uc_err uc_mem_unmap (struct uc_struct * uc , uint64_t address , uint64_t size )
1470
1462
{
1471
1463
MemoryRegion * mr ;
1472
1464
uint64_t addr ;
1473
- size_t count , len ;
1465
+ uint64_t count , len ;
1474
1466
1475
1467
UC_INIT (uc );
1476
1468
@@ -1504,7 +1496,7 @@ uc_err uc_mem_unmap(struct uc_struct *uc, uint64_t address, size_t size)
1504
1496
count = 0 ;
1505
1497
while (count < size ) {
1506
1498
mr = memory_mapping (uc , addr );
1507
- len = (size_t )MIN (size - count , mr -> end - addr );
1499
+ len = (uint64_t )MIN (size - count , mr -> end - addr );
1508
1500
if (!mr -> ram ) {
1509
1501
if (!split_mmio_region (uc , mr , addr , len , true)) {
1510
1502
return UC_ERR_NOMEM ;
0 commit comments