You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

dump.c 61KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071
  1. /*
  2. * QEMU dump
  3. *
  4. * Copyright Fujitsu, Corp. 2011, 2012
  5. *
  6. * Authors:
  7. * Wen Congyang <wency@cn.fujitsu.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10. * See the COPYING file in the top-level directory.
  11. *
  12. */
  13. #include "qemu/osdep.h"
  14. #include "qemu-common.h"
  15. #include "qemu/cutils.h"
  16. #include "elf.h"
  17. #include "cpu.h"
  18. #include "exec/hwaddr.h"
  19. #include "monitor/monitor.h"
  20. #include "sysemu/kvm.h"
  21. #include "sysemu/dump.h"
  22. #include "sysemu/sysemu.h"
  23. #include "sysemu/memory_mapping.h"
  24. #include "sysemu/cpus.h"
  25. #include "qapi/error.h"
  26. #include "qapi/qapi-commands-misc.h"
  27. #include "qapi/qapi-events-misc.h"
  28. #include "qapi/qmp/qerror.h"
  29. #include "qemu/error-report.h"
  30. #include "hw/misc/vmcoreinfo.h"
  31. #ifdef TARGET_X86_64
  32. #include "win_dump.h"
  33. #endif
  34. #include <zlib.h>
  35. #ifdef CONFIG_LZO
  36. #include <lzo/lzo1x.h>
  37. #endif
  38. #ifdef CONFIG_SNAPPY
  39. #include <snappy-c.h>
  40. #endif
  41. #ifndef ELF_MACHINE_UNAME
  42. #define ELF_MACHINE_UNAME "Unknown"
  43. #endif
  44. #define MAX_GUEST_NOTE_SIZE (1 << 20) /* 1MB should be enough */
  45. #define ELF_NOTE_SIZE(hdr_size, name_size, desc_size) \
  46. ((DIV_ROUND_UP((hdr_size), 4) + \
  47. DIV_ROUND_UP((name_size), 4) + \
  48. DIV_ROUND_UP((desc_size), 4)) * 4)
  49. uint16_t cpu_to_dump16(DumpState *s, uint16_t val)
  50. {
  51. if (s->dump_info.d_endian == ELFDATA2LSB) {
  52. val = cpu_to_le16(val);
  53. } else {
  54. val = cpu_to_be16(val);
  55. }
  56. return val;
  57. }
  58. uint32_t cpu_to_dump32(DumpState *s, uint32_t val)
  59. {
  60. if (s->dump_info.d_endian == ELFDATA2LSB) {
  61. val = cpu_to_le32(val);
  62. } else {
  63. val = cpu_to_be32(val);
  64. }
  65. return val;
  66. }
  67. uint64_t cpu_to_dump64(DumpState *s, uint64_t val)
  68. {
  69. if (s->dump_info.d_endian == ELFDATA2LSB) {
  70. val = cpu_to_le64(val);
  71. } else {
  72. val = cpu_to_be64(val);
  73. }
  74. return val;
  75. }
  76. static int dump_cleanup(DumpState *s)
  77. {
  78. guest_phys_blocks_free(&s->guest_phys_blocks);
  79. memory_mapping_list_free(&s->list);
  80. close(s->fd);
  81. g_free(s->guest_note);
  82. s->guest_note = NULL;
  83. if (s->resume) {
  84. if (s->detached) {
  85. qemu_mutex_lock_iothread();
  86. }
  87. vm_start();
  88. if (s->detached) {
  89. qemu_mutex_unlock_iothread();
  90. }
  91. }
  92. return 0;
  93. }
  94. static int fd_write_vmcore(const void *buf, size_t size, void *opaque)
  95. {
  96. DumpState *s = opaque;
  97. size_t written_size;
  98. written_size = qemu_write_full(s->fd, buf, size);
  99. if (written_size != size) {
  100. return -errno;
  101. }
  102. return 0;
  103. }
  104. static void write_elf64_header(DumpState *s, Error **errp)
  105. {
  106. Elf64_Ehdr elf_header;
  107. int ret;
  108. memset(&elf_header, 0, sizeof(Elf64_Ehdr));
  109. memcpy(&elf_header, ELFMAG, SELFMAG);
  110. elf_header.e_ident[EI_CLASS] = ELFCLASS64;
  111. elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
  112. elf_header.e_ident[EI_VERSION] = EV_CURRENT;
  113. elf_header.e_type = cpu_to_dump16(s, ET_CORE);
  114. elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
  115. elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
  116. elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
  117. elf_header.e_phoff = cpu_to_dump64(s, sizeof(Elf64_Ehdr));
  118. elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf64_Phdr));
  119. elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num);
  120. if (s->have_section) {
  121. uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
  122. elf_header.e_shoff = cpu_to_dump64(s, shoff);
  123. elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf64_Shdr));
  124. elf_header.e_shnum = cpu_to_dump16(s, 1);
  125. }
  126. ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
  127. if (ret < 0) {
  128. error_setg_errno(errp, -ret, "dump: failed to write elf header");
  129. }
  130. }
  131. static void write_elf32_header(DumpState *s, Error **errp)
  132. {
  133. Elf32_Ehdr elf_header;
  134. int ret;
  135. memset(&elf_header, 0, sizeof(Elf32_Ehdr));
  136. memcpy(&elf_header, ELFMAG, SELFMAG);
  137. elf_header.e_ident[EI_CLASS] = ELFCLASS32;
  138. elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
  139. elf_header.e_ident[EI_VERSION] = EV_CURRENT;
  140. elf_header.e_type = cpu_to_dump16(s, ET_CORE);
  141. elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
  142. elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
  143. elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
  144. elf_header.e_phoff = cpu_to_dump32(s, sizeof(Elf32_Ehdr));
  145. elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf32_Phdr));
  146. elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num);
  147. if (s->have_section) {
  148. uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
  149. elf_header.e_shoff = cpu_to_dump32(s, shoff);
  150. elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf32_Shdr));
  151. elf_header.e_shnum = cpu_to_dump16(s, 1);
  152. }
  153. ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
  154. if (ret < 0) {
  155. error_setg_errno(errp, -ret, "dump: failed to write elf header");
  156. }
  157. }
  158. static void write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
  159. int phdr_index, hwaddr offset,
  160. hwaddr filesz, Error **errp)
  161. {
  162. Elf64_Phdr phdr;
  163. int ret;
  164. memset(&phdr, 0, sizeof(Elf64_Phdr));
  165. phdr.p_type = cpu_to_dump32(s, PT_LOAD);
  166. phdr.p_offset = cpu_to_dump64(s, offset);
  167. phdr.p_paddr = cpu_to_dump64(s, memory_mapping->phys_addr);
  168. phdr.p_filesz = cpu_to_dump64(s, filesz);
  169. phdr.p_memsz = cpu_to_dump64(s, memory_mapping->length);
  170. phdr.p_vaddr = cpu_to_dump64(s, memory_mapping->virt_addr) ?: phdr.p_paddr;
  171. assert(memory_mapping->length >= filesz);
  172. ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
  173. if (ret < 0) {
  174. error_setg_errno(errp, -ret,
  175. "dump: failed to write program header table");
  176. }
  177. }
  178. static void write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
  179. int phdr_index, hwaddr offset,
  180. hwaddr filesz, Error **errp)
  181. {
  182. Elf32_Phdr phdr;
  183. int ret;
  184. memset(&phdr, 0, sizeof(Elf32_Phdr));
  185. phdr.p_type = cpu_to_dump32(s, PT_LOAD);
  186. phdr.p_offset = cpu_to_dump32(s, offset);
  187. phdr.p_paddr = cpu_to_dump32(s, memory_mapping->phys_addr);
  188. phdr.p_filesz = cpu_to_dump32(s, filesz);
  189. phdr.p_memsz = cpu_to_dump32(s, memory_mapping->length);
  190. phdr.p_vaddr =
  191. cpu_to_dump32(s, memory_mapping->virt_addr) ?: phdr.p_paddr;
  192. assert(memory_mapping->length >= filesz);
  193. ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
  194. if (ret < 0) {
  195. error_setg_errno(errp, -ret,
  196. "dump: failed to write program header table");
  197. }
  198. }
  199. static void write_elf64_note(DumpState *s, Error **errp)
  200. {
  201. Elf64_Phdr phdr;
  202. hwaddr begin = s->memory_offset - s->note_size;
  203. int ret;
  204. memset(&phdr, 0, sizeof(Elf64_Phdr));
  205. phdr.p_type = cpu_to_dump32(s, PT_NOTE);
  206. phdr.p_offset = cpu_to_dump64(s, begin);
  207. phdr.p_paddr = 0;
  208. phdr.p_filesz = cpu_to_dump64(s, s->note_size);
  209. phdr.p_memsz = cpu_to_dump64(s, s->note_size);
  210. phdr.p_vaddr = 0;
  211. ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
  212. if (ret < 0) {
  213. error_setg_errno(errp, -ret,
  214. "dump: failed to write program header table");
  215. }
  216. }
  217. static inline int cpu_index(CPUState *cpu)
  218. {
  219. return cpu->cpu_index + 1;
  220. }
  221. static void write_guest_note(WriteCoreDumpFunction f, DumpState *s,
  222. Error **errp)
  223. {
  224. int ret;
  225. if (s->guest_note) {
  226. ret = f(s->guest_note, s->guest_note_size, s);
  227. if (ret < 0) {
  228. error_setg(errp, "dump: failed to write guest note");
  229. }
  230. }
  231. }
  232. static void write_elf64_notes(WriteCoreDumpFunction f, DumpState *s,
  233. Error **errp)
  234. {
  235. CPUState *cpu;
  236. int ret;
  237. int id;
  238. CPU_FOREACH(cpu) {
  239. id = cpu_index(cpu);
  240. ret = cpu_write_elf64_note(f, cpu, id, s);
  241. if (ret < 0) {
  242. error_setg(errp, "dump: failed to write elf notes");
  243. return;
  244. }
  245. }
  246. CPU_FOREACH(cpu) {
  247. ret = cpu_write_elf64_qemunote(f, cpu, s);
  248. if (ret < 0) {
  249. error_setg(errp, "dump: failed to write CPU status");
  250. return;
  251. }
  252. }
  253. write_guest_note(f, s, errp);
  254. }
  255. static void write_elf32_note(DumpState *s, Error **errp)
  256. {
  257. hwaddr begin = s->memory_offset - s->note_size;
  258. Elf32_Phdr phdr;
  259. int ret;
  260. memset(&phdr, 0, sizeof(Elf32_Phdr));
  261. phdr.p_type = cpu_to_dump32(s, PT_NOTE);
  262. phdr.p_offset = cpu_to_dump32(s, begin);
  263. phdr.p_paddr = 0;
  264. phdr.p_filesz = cpu_to_dump32(s, s->note_size);
  265. phdr.p_memsz = cpu_to_dump32(s, s->note_size);
  266. phdr.p_vaddr = 0;
  267. ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
  268. if (ret < 0) {
  269. error_setg_errno(errp, -ret,
  270. "dump: failed to write program header table");
  271. }
  272. }
  273. static void write_elf32_notes(WriteCoreDumpFunction f, DumpState *s,
  274. Error **errp)
  275. {
  276. CPUState *cpu;
  277. int ret;
  278. int id;
  279. CPU_FOREACH(cpu) {
  280. id = cpu_index(cpu);
  281. ret = cpu_write_elf32_note(f, cpu, id, s);
  282. if (ret < 0) {
  283. error_setg(errp, "dump: failed to write elf notes");
  284. return;
  285. }
  286. }
  287. CPU_FOREACH(cpu) {
  288. ret = cpu_write_elf32_qemunote(f, cpu, s);
  289. if (ret < 0) {
  290. error_setg(errp, "dump: failed to write CPU status");
  291. return;
  292. }
  293. }
  294. write_guest_note(f, s, errp);
  295. }
  296. static void write_elf_section(DumpState *s, int type, Error **errp)
  297. {
  298. Elf32_Shdr shdr32;
  299. Elf64_Shdr shdr64;
  300. int shdr_size;
  301. void *shdr;
  302. int ret;
  303. if (type == 0) {
  304. shdr_size = sizeof(Elf32_Shdr);
  305. memset(&shdr32, 0, shdr_size);
  306. shdr32.sh_info = cpu_to_dump32(s, s->sh_info);
  307. shdr = &shdr32;
  308. } else {
  309. shdr_size = sizeof(Elf64_Shdr);
  310. memset(&shdr64, 0, shdr_size);
  311. shdr64.sh_info = cpu_to_dump32(s, s->sh_info);
  312. shdr = &shdr64;
  313. }
  314. ret = fd_write_vmcore(&shdr, shdr_size, s);
  315. if (ret < 0) {
  316. error_setg_errno(errp, -ret,
  317. "dump: failed to write section header table");
  318. }
  319. }
  320. static void write_data(DumpState *s, void *buf, int length, Error **errp)
  321. {
  322. int ret;
  323. ret = fd_write_vmcore(buf, length, s);
  324. if (ret < 0) {
  325. error_setg_errno(errp, -ret, "dump: failed to save memory");
  326. } else {
  327. s->written_size += length;
  328. }
  329. }
  330. /* write the memory to vmcore. 1 page per I/O. */
  331. static void write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start,
  332. int64_t size, Error **errp)
  333. {
  334. int64_t i;
  335. Error *local_err = NULL;
  336. for (i = 0; i < size / s->dump_info.page_size; i++) {
  337. write_data(s, block->host_addr + start + i * s->dump_info.page_size,
  338. s->dump_info.page_size, &local_err);
  339. if (local_err) {
  340. error_propagate(errp, local_err);
  341. return;
  342. }
  343. }
  344. if ((size % s->dump_info.page_size) != 0) {
  345. write_data(s, block->host_addr + start + i * s->dump_info.page_size,
  346. size % s->dump_info.page_size, &local_err);
  347. if (local_err) {
  348. error_propagate(errp, local_err);
  349. return;
  350. }
  351. }
  352. }
  353. /* get the memory's offset and size in the vmcore */
  354. static void get_offset_range(hwaddr phys_addr,
  355. ram_addr_t mapping_length,
  356. DumpState *s,
  357. hwaddr *p_offset,
  358. hwaddr *p_filesz)
  359. {
  360. GuestPhysBlock *block;
  361. hwaddr offset = s->memory_offset;
  362. int64_t size_in_block, start;
  363. /* When the memory is not stored into vmcore, offset will be -1 */
  364. *p_offset = -1;
  365. *p_filesz = 0;
  366. if (s->has_filter) {
  367. if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
  368. return;
  369. }
  370. }
  371. QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
  372. if (s->has_filter) {
  373. if (block->target_start >= s->begin + s->length ||
  374. block->target_end <= s->begin) {
  375. /* This block is out of the range */
  376. continue;
  377. }
  378. if (s->begin <= block->target_start) {
  379. start = block->target_start;
  380. } else {
  381. start = s->begin;
  382. }
  383. size_in_block = block->target_end - start;
  384. if (s->begin + s->length < block->target_end) {
  385. size_in_block -= block->target_end - (s->begin + s->length);
  386. }
  387. } else {
  388. start = block->target_start;
  389. size_in_block = block->target_end - block->target_start;
  390. }
  391. if (phys_addr >= start && phys_addr < start + size_in_block) {
  392. *p_offset = phys_addr - start + offset;
  393. /* The offset range mapped from the vmcore file must not spill over
  394. * the GuestPhysBlock, clamp it. The rest of the mapping will be
  395. * zero-filled in memory at load time; see
  396. * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
  397. */
  398. *p_filesz = phys_addr + mapping_length <= start + size_in_block ?
  399. mapping_length :
  400. size_in_block - (phys_addr - start);
  401. return;
  402. }
  403. offset += size_in_block;
  404. }
  405. }
  406. static void write_elf_loads(DumpState *s, Error **errp)
  407. {
  408. hwaddr offset, filesz;
  409. MemoryMapping *memory_mapping;
  410. uint32_t phdr_index = 1;
  411. uint32_t max_index;
  412. Error *local_err = NULL;
  413. if (s->have_section) {
  414. max_index = s->sh_info;
  415. } else {
  416. max_index = s->phdr_num;
  417. }
  418. QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
  419. get_offset_range(memory_mapping->phys_addr,
  420. memory_mapping->length,
  421. s, &offset, &filesz);
  422. if (s->dump_info.d_class == ELFCLASS64) {
  423. write_elf64_load(s, memory_mapping, phdr_index++, offset,
  424. filesz, &local_err);
  425. } else {
  426. write_elf32_load(s, memory_mapping, phdr_index++, offset,
  427. filesz, &local_err);
  428. }
  429. if (local_err) {
  430. error_propagate(errp, local_err);
  431. return;
  432. }
  433. if (phdr_index >= max_index) {
  434. break;
  435. }
  436. }
  437. }
  438. /* write elf header, PT_NOTE and elf note to vmcore. */
  439. static void dump_begin(DumpState *s, Error **errp)
  440. {
  441. Error *local_err = NULL;
  442. /*
  443. * the vmcore's format is:
  444. * --------------
  445. * | elf header |
  446. * --------------
  447. * | PT_NOTE |
  448. * --------------
  449. * | PT_LOAD |
  450. * --------------
  451. * | ...... |
  452. * --------------
  453. * | PT_LOAD |
  454. * --------------
  455. * | sec_hdr |
  456. * --------------
  457. * | elf note |
  458. * --------------
  459. * | memory |
  460. * --------------
  461. *
  462. * we only know where the memory is saved after we write elf note into
  463. * vmcore.
  464. */
  465. /* write elf header to vmcore */
  466. if (s->dump_info.d_class == ELFCLASS64) {
  467. write_elf64_header(s, &local_err);
  468. } else {
  469. write_elf32_header(s, &local_err);
  470. }
  471. if (local_err) {
  472. error_propagate(errp, local_err);
  473. return;
  474. }
  475. if (s->dump_info.d_class == ELFCLASS64) {
  476. /* write PT_NOTE to vmcore */
  477. write_elf64_note(s, &local_err);
  478. if (local_err) {
  479. error_propagate(errp, local_err);
  480. return;
  481. }
  482. /* write all PT_LOAD to vmcore */
  483. write_elf_loads(s, &local_err);
  484. if (local_err) {
  485. error_propagate(errp, local_err);
  486. return;
  487. }
  488. /* write section to vmcore */
  489. if (s->have_section) {
  490. write_elf_section(s, 1, &local_err);
  491. if (local_err) {
  492. error_propagate(errp, local_err);
  493. return;
  494. }
  495. }
  496. /* write notes to vmcore */
  497. write_elf64_notes(fd_write_vmcore, s, &local_err);
  498. if (local_err) {
  499. error_propagate(errp, local_err);
  500. return;
  501. }
  502. } else {
  503. /* write PT_NOTE to vmcore */
  504. write_elf32_note(s, &local_err);
  505. if (local_err) {
  506. error_propagate(errp, local_err);
  507. return;
  508. }
  509. /* write all PT_LOAD to vmcore */
  510. write_elf_loads(s, &local_err);
  511. if (local_err) {
  512. error_propagate(errp, local_err);
  513. return;
  514. }
  515. /* write section to vmcore */
  516. if (s->have_section) {
  517. write_elf_section(s, 0, &local_err);
  518. if (local_err) {
  519. error_propagate(errp, local_err);
  520. return;
  521. }
  522. }
  523. /* write notes to vmcore */
  524. write_elf32_notes(fd_write_vmcore, s, &local_err);
  525. if (local_err) {
  526. error_propagate(errp, local_err);
  527. return;
  528. }
  529. }
  530. }
  531. static int get_next_block(DumpState *s, GuestPhysBlock *block)
  532. {
  533. while (1) {
  534. block = QTAILQ_NEXT(block, next);
  535. if (!block) {
  536. /* no more block */
  537. return 1;
  538. }
  539. s->start = 0;
  540. s->next_block = block;
  541. if (s->has_filter) {
  542. if (block->target_start >= s->begin + s->length ||
  543. block->target_end <= s->begin) {
  544. /* This block is out of the range */
  545. continue;
  546. }
  547. if (s->begin > block->target_start) {
  548. s->start = s->begin - block->target_start;
  549. }
  550. }
  551. return 0;
  552. }
  553. }
  554. /* write all memory to vmcore */
  555. static void dump_iterate(DumpState *s, Error **errp)
  556. {
  557. GuestPhysBlock *block;
  558. int64_t size;
  559. Error *local_err = NULL;
  560. do {
  561. block = s->next_block;
  562. size = block->target_end - block->target_start;
  563. if (s->has_filter) {
  564. size -= s->start;
  565. if (s->begin + s->length < block->target_end) {
  566. size -= block->target_end - (s->begin + s->length);
  567. }
  568. }
  569. write_memory(s, block, s->start, size, &local_err);
  570. if (local_err) {
  571. error_propagate(errp, local_err);
  572. return;
  573. }
  574. } while (!get_next_block(s, block));
  575. }
  576. static void create_vmcore(DumpState *s, Error **errp)
  577. {
  578. Error *local_err = NULL;
  579. dump_begin(s, &local_err);
  580. if (local_err) {
  581. error_propagate(errp, local_err);
  582. return;
  583. }
  584. dump_iterate(s, errp);
  585. }
  586. static int write_start_flat_header(int fd)
  587. {
  588. MakedumpfileHeader *mh;
  589. int ret = 0;
  590. QEMU_BUILD_BUG_ON(sizeof *mh > MAX_SIZE_MDF_HEADER);
  591. mh = g_malloc0(MAX_SIZE_MDF_HEADER);
  592. memcpy(mh->signature, MAKEDUMPFILE_SIGNATURE,
  593. MIN(sizeof mh->signature, sizeof MAKEDUMPFILE_SIGNATURE));
  594. mh->type = cpu_to_be64(TYPE_FLAT_HEADER);
  595. mh->version = cpu_to_be64(VERSION_FLAT_HEADER);
  596. size_t written_size;
  597. written_size = qemu_write_full(fd, mh, MAX_SIZE_MDF_HEADER);
  598. if (written_size != MAX_SIZE_MDF_HEADER) {
  599. ret = -1;
  600. }
  601. g_free(mh);
  602. return ret;
  603. }
  604. static int write_end_flat_header(int fd)
  605. {
  606. MakedumpfileDataHeader mdh;
  607. mdh.offset = END_FLAG_FLAT_HEADER;
  608. mdh.buf_size = END_FLAG_FLAT_HEADER;
  609. size_t written_size;
  610. written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
  611. if (written_size != sizeof(mdh)) {
  612. return -1;
  613. }
  614. return 0;
  615. }
  616. static int write_buffer(int fd, off_t offset, const void *buf, size_t size)
  617. {
  618. size_t written_size;
  619. MakedumpfileDataHeader mdh;
  620. mdh.offset = cpu_to_be64(offset);
  621. mdh.buf_size = cpu_to_be64(size);
  622. written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
  623. if (written_size != sizeof(mdh)) {
  624. return -1;
  625. }
  626. written_size = qemu_write_full(fd, buf, size);
  627. if (written_size != size) {
  628. return -1;
  629. }
  630. return 0;
  631. }
  632. static int buf_write_note(const void *buf, size_t size, void *opaque)
  633. {
  634. DumpState *s = opaque;
  635. /* note_buf is not enough */
  636. if (s->note_buf_offset + size > s->note_size) {
  637. return -1;
  638. }
  639. memcpy(s->note_buf + s->note_buf_offset, buf, size);
  640. s->note_buf_offset += size;
  641. return 0;
  642. }
  643. /*
  644. * This function retrieves various sizes from an elf header.
  645. *
  646. * @note has to be a valid ELF note. The return sizes are unmodified
  647. * (not padded or rounded up to be multiple of 4).
  648. */
  649. static void get_note_sizes(DumpState *s, const void *note,
  650. uint64_t *note_head_size,
  651. uint64_t *name_size,
  652. uint64_t *desc_size)
  653. {
  654. uint64_t note_head_sz;
  655. uint64_t name_sz;
  656. uint64_t desc_sz;
  657. if (s->dump_info.d_class == ELFCLASS64) {
  658. const Elf64_Nhdr *hdr = note;
  659. note_head_sz = sizeof(Elf64_Nhdr);
  660. name_sz = tswap64(hdr->n_namesz);
  661. desc_sz = tswap64(hdr->n_descsz);
  662. } else {
  663. const Elf32_Nhdr *hdr = note;
  664. note_head_sz = sizeof(Elf32_Nhdr);
  665. name_sz = tswap32(hdr->n_namesz);
  666. desc_sz = tswap32(hdr->n_descsz);
  667. }
  668. if (note_head_size) {
  669. *note_head_size = note_head_sz;
  670. }
  671. if (name_size) {
  672. *name_size = name_sz;
  673. }
  674. if (desc_size) {
  675. *desc_size = desc_sz;
  676. }
  677. }
  678. static bool note_name_equal(DumpState *s,
  679. const uint8_t *note, const char *name)
  680. {
  681. int len = strlen(name) + 1;
  682. uint64_t head_size, name_size;
  683. get_note_sizes(s, note, &head_size, &name_size, NULL);
  684. head_size = ROUND_UP(head_size, 4);
  685. return name_size == len && memcmp(note + head_size, name, len) == 0;
  686. }
  687. /* write common header, sub header and elf note to vmcore */
  688. static void create_header32(DumpState *s, Error **errp)
  689. {
  690. DiskDumpHeader32 *dh = NULL;
  691. KdumpSubHeader32 *kh = NULL;
  692. size_t size;
  693. uint32_t block_size;
  694. uint32_t sub_hdr_size;
  695. uint32_t bitmap_blocks;
  696. uint32_t status = 0;
  697. uint64_t offset_note;
  698. Error *local_err = NULL;
  699. /* write common header, the version of kdump-compressed format is 6th */
  700. size = sizeof(DiskDumpHeader32);
  701. dh = g_malloc0(size);
  702. memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN);
  703. dh->header_version = cpu_to_dump32(s, 6);
  704. block_size = s->dump_info.page_size;
  705. dh->block_size = cpu_to_dump32(s, block_size);
  706. sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size;
  707. sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
  708. dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
  709. /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
  710. dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
  711. dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
  712. bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
  713. dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
  714. strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
  715. if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
  716. status |= DUMP_DH_COMPRESSED_ZLIB;
  717. }
  718. #ifdef CONFIG_LZO
  719. if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
  720. status |= DUMP_DH_COMPRESSED_LZO;
  721. }
  722. #endif
  723. #ifdef CONFIG_SNAPPY
  724. if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
  725. status |= DUMP_DH_COMPRESSED_SNAPPY;
  726. }
  727. #endif
  728. dh->status = cpu_to_dump32(s, status);
  729. if (write_buffer(s->fd, 0, dh, size) < 0) {
  730. error_setg(errp, "dump: failed to write disk dump header");
  731. goto out;
  732. }
  733. /* write sub header */
  734. size = sizeof(KdumpSubHeader32);
  735. kh = g_malloc0(size);
  736. /* 64bit max_mapnr_64 */
  737. kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
  738. kh->phys_base = cpu_to_dump32(s, s->dump_info.phys_base);
  739. kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
  740. offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
  741. if (s->guest_note &&
  742. note_name_equal(s, s->guest_note, "VMCOREINFO")) {
  743. uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo;
  744. get_note_sizes(s, s->guest_note,
  745. &hsize, &name_size, &size_vmcoreinfo_desc);
  746. offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size +
  747. (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4;
  748. kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo);
  749. kh->size_vmcoreinfo = cpu_to_dump32(s, size_vmcoreinfo_desc);
  750. }
  751. kh->offset_note = cpu_to_dump64(s, offset_note);
  752. kh->note_size = cpu_to_dump32(s, s->note_size);
  753. if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
  754. block_size, kh, size) < 0) {
  755. error_setg(errp, "dump: failed to write kdump sub header");
  756. goto out;
  757. }
  758. /* write note */
  759. s->note_buf = g_malloc0(s->note_size);
  760. s->note_buf_offset = 0;
  761. /* use s->note_buf to store notes temporarily */
  762. write_elf32_notes(buf_write_note, s, &local_err);
  763. if (local_err) {
  764. error_propagate(errp, local_err);
  765. goto out;
  766. }
  767. if (write_buffer(s->fd, offset_note, s->note_buf,
  768. s->note_size) < 0) {
  769. error_setg(errp, "dump: failed to write notes");
  770. goto out;
  771. }
  772. /* get offset of dump_bitmap */
  773. s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
  774. block_size;
  775. /* get offset of page */
  776. s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
  777. block_size;
  778. out:
  779. g_free(dh);
  780. g_free(kh);
  781. g_free(s->note_buf);
  782. }
  783. /* write common header, sub header and elf note to vmcore */
  784. static void create_header64(DumpState *s, Error **errp)
  785. {
  786. DiskDumpHeader64 *dh = NULL;
  787. KdumpSubHeader64 *kh = NULL;
  788. size_t size;
  789. uint32_t block_size;
  790. uint32_t sub_hdr_size;
  791. uint32_t bitmap_blocks;
  792. uint32_t status = 0;
  793. uint64_t offset_note;
  794. Error *local_err = NULL;
  795. /* write common header, the version of kdump-compressed format is 6th */
  796. size = sizeof(DiskDumpHeader64);
  797. dh = g_malloc0(size);
  798. memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN);
  799. dh->header_version = cpu_to_dump32(s, 6);
  800. block_size = s->dump_info.page_size;
  801. dh->block_size = cpu_to_dump32(s, block_size);
  802. sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size;
  803. sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
  804. dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
  805. /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
  806. dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
  807. dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
  808. bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
  809. dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
  810. strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
  811. if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
  812. status |= DUMP_DH_COMPRESSED_ZLIB;
  813. }
  814. #ifdef CONFIG_LZO
  815. if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
  816. status |= DUMP_DH_COMPRESSED_LZO;
  817. }
  818. #endif
  819. #ifdef CONFIG_SNAPPY
  820. if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
  821. status |= DUMP_DH_COMPRESSED_SNAPPY;
  822. }
  823. #endif
  824. dh->status = cpu_to_dump32(s, status);
  825. if (write_buffer(s->fd, 0, dh, size) < 0) {
  826. error_setg(errp, "dump: failed to write disk dump header");
  827. goto out;
  828. }
  829. /* write sub header */
  830. size = sizeof(KdumpSubHeader64);
  831. kh = g_malloc0(size);
  832. /* 64bit max_mapnr_64 */
  833. kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
  834. kh->phys_base = cpu_to_dump64(s, s->dump_info.phys_base);
  835. kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
  836. offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
  837. if (s->guest_note &&
  838. note_name_equal(s, s->guest_note, "VMCOREINFO")) {
  839. uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo;
  840. get_note_sizes(s, s->guest_note,
  841. &hsize, &name_size, &size_vmcoreinfo_desc);
  842. offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size +
  843. (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4;
  844. kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo);
  845. kh->size_vmcoreinfo = cpu_to_dump64(s, size_vmcoreinfo_desc);
  846. }
  847. kh->offset_note = cpu_to_dump64(s, offset_note);
  848. kh->note_size = cpu_to_dump64(s, s->note_size);
  849. if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
  850. block_size, kh, size) < 0) {
  851. error_setg(errp, "dump: failed to write kdump sub header");
  852. goto out;
  853. }
  854. /* write note */
  855. s->note_buf = g_malloc0(s->note_size);
  856. s->note_buf_offset = 0;
  857. /* use s->note_buf to store notes temporarily */
  858. write_elf64_notes(buf_write_note, s, &local_err);
  859. if (local_err) {
  860. error_propagate(errp, local_err);
  861. goto out;
  862. }
  863. if (write_buffer(s->fd, offset_note, s->note_buf,
  864. s->note_size) < 0) {
  865. error_setg(errp, "dump: failed to write notes");
  866. goto out;
  867. }
  868. /* get offset of dump_bitmap */
  869. s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
  870. block_size;
  871. /* get offset of page */
  872. s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
  873. block_size;
  874. out:
  875. g_free(dh);
  876. g_free(kh);
  877. g_free(s->note_buf);
  878. }
  879. static void write_dump_header(DumpState *s, Error **errp)
  880. {
  881. Error *local_err = NULL;
  882. if (s->dump_info.d_class == ELFCLASS32) {
  883. create_header32(s, &local_err);
  884. } else {
  885. create_header64(s, &local_err);
  886. }
  887. error_propagate(errp, local_err);
  888. }
  889. static size_t dump_bitmap_get_bufsize(DumpState *s)
  890. {
  891. return s->dump_info.page_size;
  892. }
  893. /*
  894. * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be
  895. * rewritten, so if need to set the first bit, set last_pfn and pfn to 0.
  896. * set_dump_bitmap will always leave the recently set bit un-sync. And setting
  897. * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into
  898. * vmcore, ie. synchronizing un-sync bit into vmcore.
  899. */
  900. static int set_dump_bitmap(uint64_t last_pfn, uint64_t pfn, bool value,
  901. uint8_t *buf, DumpState *s)
  902. {
  903. off_t old_offset, new_offset;
  904. off_t offset_bitmap1, offset_bitmap2;
  905. uint32_t byte, bit;
  906. size_t bitmap_bufsize = dump_bitmap_get_bufsize(s);
  907. size_t bits_per_buf = bitmap_bufsize * CHAR_BIT;
  908. /* should not set the previous place */
  909. assert(last_pfn <= pfn);
  910. /*
  911. * if the bit needed to be set is not cached in buf, flush the data in buf
  912. * to vmcore firstly.
  913. * making new_offset be bigger than old_offset can also sync remained data
  914. * into vmcore.
  915. */
  916. old_offset = bitmap_bufsize * (last_pfn / bits_per_buf);
  917. new_offset = bitmap_bufsize * (pfn / bits_per_buf);
  918. while (old_offset < new_offset) {
  919. /* calculate the offset and write dump_bitmap */
  920. offset_bitmap1 = s->offset_dump_bitmap + old_offset;
  921. if (write_buffer(s->fd, offset_bitmap1, buf,
  922. bitmap_bufsize) < 0) {
  923. return -1;
  924. }
  925. /* dump level 1 is chosen, so 1st and 2nd bitmap are same */
  926. offset_bitmap2 = s->offset_dump_bitmap + s->len_dump_bitmap +
  927. old_offset;
  928. if (write_buffer(s->fd, offset_bitmap2, buf,
  929. bitmap_bufsize) < 0) {
  930. return -1;
  931. }
  932. memset(buf, 0, bitmap_bufsize);
  933. old_offset += bitmap_bufsize;
  934. }
  935. /* get the exact place of the bit in the buf, and set it */
  936. byte = (pfn % bits_per_buf) / CHAR_BIT;
  937. bit = (pfn % bits_per_buf) % CHAR_BIT;
  938. if (value) {
  939. buf[byte] |= 1u << bit;
  940. } else {
  941. buf[byte] &= ~(1u << bit);
  942. }
  943. return 0;
  944. }
  945. static uint64_t dump_paddr_to_pfn(DumpState *s, uint64_t addr)
  946. {
  947. int target_page_shift = ctz32(s->dump_info.page_size);
  948. return (addr >> target_page_shift) - ARCH_PFN_OFFSET;
  949. }
  950. static uint64_t dump_pfn_to_paddr(DumpState *s, uint64_t pfn)
  951. {
  952. int target_page_shift = ctz32(s->dump_info.page_size);
  953. return (pfn + ARCH_PFN_OFFSET) << target_page_shift;
  954. }
  955. /*
  956. * exam every page and return the page frame number and the address of the page.
  957. * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys
  958. * blocks, so block->target_start and block->target_end should be interal
  959. * multiples of the target page size.
  960. */
  961. static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr,
  962. uint8_t **bufptr, DumpState *s)
  963. {
  964. GuestPhysBlock *block = *blockptr;
  965. hwaddr addr, target_page_mask = ~((hwaddr)s->dump_info.page_size - 1);
  966. uint8_t *buf;
  967. /* block == NULL means the start of the iteration */
  968. if (!block) {
  969. block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
  970. *blockptr = block;
  971. assert((block->target_start & ~target_page_mask) == 0);
  972. assert((block->target_end & ~target_page_mask) == 0);
  973. *pfnptr = dump_paddr_to_pfn(s, block->target_start);
  974. if (bufptr) {
  975. *bufptr = block->host_addr;
  976. }
  977. return true;
  978. }
  979. *pfnptr = *pfnptr + 1;
  980. addr = dump_pfn_to_paddr(s, *pfnptr);
  981. if ((addr >= block->target_start) &&
  982. (addr + s->dump_info.page_size <= block->target_end)) {
  983. buf = block->host_addr + (addr - block->target_start);
  984. } else {
  985. /* the next page is in the next block */
  986. block = QTAILQ_NEXT(block, next);
  987. *blockptr = block;
  988. if (!block) {
  989. return false;
  990. }
  991. assert((block->target_start & ~target_page_mask) == 0);
  992. assert((block->target_end & ~target_page_mask) == 0);
  993. *pfnptr = dump_paddr_to_pfn(s, block->target_start);
  994. buf = block->host_addr;
  995. }
  996. if (bufptr) {
  997. *bufptr = buf;
  998. }
  999. return true;
  1000. }
  1001. static void write_dump_bitmap(DumpState *s, Error **errp)
  1002. {
  1003. int ret = 0;
  1004. uint64_t last_pfn, pfn;
  1005. void *dump_bitmap_buf;
  1006. size_t num_dumpable;
  1007. GuestPhysBlock *block_iter = NULL;
  1008. size_t bitmap_bufsize = dump_bitmap_get_bufsize(s);
  1009. size_t bits_per_buf = bitmap_bufsize * CHAR_BIT;
  1010. /* dump_bitmap_buf is used to store dump_bitmap temporarily */
  1011. dump_bitmap_buf = g_malloc0(bitmap_bufsize);
  1012. num_dumpable = 0;
  1013. last_pfn = 0;
  1014. /*
  1015. * exam memory page by page, and set the bit in dump_bitmap corresponded
  1016. * to the existing page.
  1017. */
  1018. while (get_next_page(&block_iter, &pfn, NULL, s)) {
  1019. ret = set_dump_bitmap(last_pfn, pfn, true, dump_bitmap_buf, s);
  1020. if (ret < 0) {
  1021. error_setg(errp, "dump: failed to set dump_bitmap");
  1022. goto out;
  1023. }
  1024. last_pfn = pfn;
  1025. num_dumpable++;
  1026. }
  1027. /*
  1028. * set_dump_bitmap will always leave the recently set bit un-sync. Here we
  1029. * set the remaining bits from last_pfn to the end of the bitmap buffer to
  1030. * 0. With those set, the un-sync bit will be synchronized into the vmcore.
  1031. */
  1032. if (num_dumpable > 0) {
  1033. ret = set_dump_bitmap(last_pfn, last_pfn + bits_per_buf, false,
  1034. dump_bitmap_buf, s);
  1035. if (ret < 0) {
  1036. error_setg(errp, "dump: failed to sync dump_bitmap");
  1037. goto out;
  1038. }
  1039. }
  1040. /* number of dumpable pages that will be dumped later */
  1041. s->num_dumpable = num_dumpable;
  1042. out:
  1043. g_free(dump_bitmap_buf);
  1044. }
  1045. static void prepare_data_cache(DataCache *data_cache, DumpState *s,
  1046. off_t offset)
  1047. {
  1048. data_cache->fd = s->fd;
  1049. data_cache->data_size = 0;
  1050. data_cache->buf_size = 4 * dump_bitmap_get_bufsize(s);
  1051. data_cache->buf = g_malloc0(data_cache->buf_size);
  1052. data_cache->offset = offset;
  1053. }
  1054. static int write_cache(DataCache *dc, const void *buf, size_t size,
  1055. bool flag_sync)
  1056. {
  1057. /*
  1058. * dc->buf_size should not be less than size, otherwise dc will never be
  1059. * enough
  1060. */
  1061. assert(size <= dc->buf_size);
  1062. /*
  1063. * if flag_sync is set, synchronize data in dc->buf into vmcore.
  1064. * otherwise check if the space is enough for caching data in buf, if not,
  1065. * write the data in dc->buf to dc->fd and reset dc->buf
  1066. */
  1067. if ((!flag_sync && dc->data_size + size > dc->buf_size) ||
  1068. (flag_sync && dc->data_size > 0)) {
  1069. if (write_buffer(dc->fd, dc->offset, dc->buf, dc->data_size) < 0) {
  1070. return -1;
  1071. }
  1072. dc->offset += dc->data_size;
  1073. dc->data_size = 0;
  1074. }
  1075. if (!flag_sync) {
  1076. memcpy(dc->buf + dc->data_size, buf, size);
  1077. dc->data_size += size;
  1078. }
  1079. return 0;
  1080. }
  1081. static void free_data_cache(DataCache *data_cache)
  1082. {
  1083. g_free(data_cache->buf);
  1084. }
  1085. static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress)
  1086. {
  1087. switch (flag_compress) {
  1088. case DUMP_DH_COMPRESSED_ZLIB:
  1089. return compressBound(page_size);
  1090. case DUMP_DH_COMPRESSED_LZO:
  1091. /*
  1092. * LZO will expand incompressible data by a little amount. Please check
  1093. * the following URL to see the expansion calculation:
  1094. * http://www.oberhumer.com/opensource/lzo/lzofaq.php
  1095. */
  1096. return page_size + page_size / 16 + 64 + 3;
  1097. #ifdef CONFIG_SNAPPY
  1098. case DUMP_DH_COMPRESSED_SNAPPY:
  1099. return snappy_max_compressed_length(page_size);
  1100. #endif
  1101. }
  1102. return 0;
  1103. }
  1104. /*
  1105. * check if the page is all 0
  1106. */
  1107. static inline bool is_zero_page(const uint8_t *buf, size_t page_size)
  1108. {
  1109. return buffer_is_zero(buf, page_size);
  1110. }
  1111. static void write_dump_pages(DumpState *s, Error **errp)
  1112. {
  1113. int ret = 0;
  1114. DataCache page_desc, page_data;
  1115. size_t len_buf_out, size_out;
  1116. #ifdef CONFIG_LZO
  1117. lzo_bytep wrkmem = NULL;
  1118. #endif
  1119. uint8_t *buf_out = NULL;
  1120. off_t offset_desc, offset_data;
  1121. PageDescriptor pd, pd_zero;
  1122. uint8_t *buf;
  1123. GuestPhysBlock *block_iter = NULL;
  1124. uint64_t pfn_iter;
  1125. /* get offset of page_desc and page_data in dump file */
  1126. offset_desc = s->offset_page;
  1127. offset_data = offset_desc + sizeof(PageDescriptor) * s->num_dumpable;
  1128. prepare_data_cache(&page_desc, s, offset_desc);
  1129. prepare_data_cache(&page_data, s, offset_data);
  1130. /* prepare buffer to store compressed data */
  1131. len_buf_out = get_len_buf_out(s->dump_info.page_size, s->flag_compress);
  1132. assert(len_buf_out != 0);
  1133. #ifdef CONFIG_LZO
  1134. wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS);
  1135. #endif
  1136. buf_out = g_malloc(len_buf_out);
  1137. /*
  1138. * init zero page's page_desc and page_data, because every zero page
  1139. * uses the same page_data
  1140. */
  1141. pd_zero.size = cpu_to_dump32(s, s->dump_info.page_size);
  1142. pd_zero.flags = cpu_to_dump32(s, 0);
  1143. pd_zero.offset = cpu_to_dump64(s, offset_data);
  1144. pd_zero.page_flags = cpu_to_dump64(s, 0);
  1145. buf = g_malloc0(s->dump_info.page_size);
  1146. ret = write_cache(&page_data, buf, s->dump_info.page_size, false);
  1147. g_free(buf);
  1148. if (ret < 0) {
  1149. error_setg(errp, "dump: failed to write page data (zero page)");
  1150. goto out;
  1151. }
  1152. offset_data += s->dump_info.page_size;
  1153. /*
  1154. * dump memory to vmcore page by page. zero page will all be resided in the
  1155. * first page of page section
  1156. */
  1157. while (get_next_page(&block_iter, &pfn_iter, &buf, s)) {
  1158. /* check zero page */
  1159. if (is_zero_page(buf, s->dump_info.page_size)) {
  1160. ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor),
  1161. false);
  1162. if (ret < 0) {
  1163. error_setg(errp, "dump: failed to write page desc");
  1164. goto out;
  1165. }
  1166. } else {
  1167. /*
  1168. * not zero page, then:
  1169. * 1. compress the page
  1170. * 2. write the compressed page into the cache of page_data
  1171. * 3. get page desc of the compressed page and write it into the
  1172. * cache of page_desc
  1173. *
  1174. * only one compression format will be used here, for
  1175. * s->flag_compress is set. But when compression fails to work,
  1176. * we fall back to save in plaintext.
  1177. */
  1178. size_out = len_buf_out;
  1179. if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) &&
  1180. (compress2(buf_out, (uLongf *)&size_out, buf,
  1181. s->dump_info.page_size, Z_BEST_SPEED) == Z_OK) &&
  1182. (size_out < s->dump_info.page_size)) {
  1183. pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_ZLIB);
  1184. pd.size = cpu_to_dump32(s, size_out);
  1185. ret = write_cache(&page_data, buf_out, size_out, false);
  1186. if (ret < 0) {
  1187. error_setg(errp, "dump: failed to write page data");
  1188. goto out;
  1189. }
  1190. #ifdef CONFIG_LZO
  1191. } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) &&
  1192. (lzo1x_1_compress(buf, s->dump_info.page_size, buf_out,
  1193. (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) &&
  1194. (size_out < s->dump_info.page_size)) {
  1195. pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_LZO);
  1196. pd.size = cpu_to_dump32(s, size_out);
  1197. ret = write_cache(&page_data, buf_out, size_out, false);
  1198. if (ret < 0) {
  1199. error_setg(errp, "dump: failed to write page data");
  1200. goto out;
  1201. }
  1202. #endif
  1203. #ifdef CONFIG_SNAPPY
  1204. } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) &&
  1205. (snappy_compress((char *)buf, s->dump_info.page_size,
  1206. (char *)buf_out, &size_out) == SNAPPY_OK) &&
  1207. (size_out < s->dump_info.page_size)) {
  1208. pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_SNAPPY);
  1209. pd.size = cpu_to_dump32(s, size_out);
  1210. ret = write_cache(&page_data, buf_out, size_out, false);
  1211. if (ret < 0) {
  1212. error_setg(errp, "dump: failed to write page data");
  1213. goto out;
  1214. }
  1215. #endif
  1216. } else {
  1217. /*
  1218. * fall back to save in plaintext, size_out should be
  1219. * assigned the target's page size
  1220. */
  1221. pd.flags = cpu_to_dump32(s, 0);
  1222. size_out = s->dump_info.page_size;
  1223. pd.size = cpu_to_dump32(s, size_out);
  1224. ret = write_cache(&page_data, buf,
  1225. s->dump_info.page_size, false);
  1226. if (ret < 0) {
  1227. error_setg(errp, "dump: failed to write page data");
  1228. goto out;
  1229. }
  1230. }
  1231. /* get and write page desc here */
  1232. pd.page_flags = cpu_to_dump64(s, 0);
  1233. pd.offset = cpu_to_dump64(s, offset_data);
  1234. offset_data += size_out;
  1235. ret = write_cache(&page_desc, &pd, sizeof(PageDescriptor), false);
  1236. if (ret < 0) {
  1237. error_setg(errp, "dump: failed to write page desc");
  1238. goto out;
  1239. }
  1240. }
  1241. s->written_size += s->dump_info.page_size;
  1242. }
  1243. ret = write_cache(&page_desc, NULL, 0, true);
  1244. if (ret < 0) {
  1245. error_setg(errp, "dump: failed to sync cache for page_desc");
  1246. goto out;
  1247. }
  1248. ret = write_cache(&page_data, NULL, 0, true);
  1249. if (ret < 0) {
  1250. error_setg(errp, "dump: failed to sync cache for page_data");
  1251. goto out;
  1252. }
  1253. out:
  1254. free_data_cache(&page_desc);
  1255. free_data_cache(&page_data);
  1256. #ifdef CONFIG_LZO
  1257. g_free(wrkmem);
  1258. #endif
  1259. g_free(buf_out);
  1260. }
  1261. static void create_kdump_vmcore(DumpState *s, Error **errp)
  1262. {
  1263. int ret;
  1264. Error *local_err = NULL;
  1265. /*
  1266. * the kdump-compressed format is:
  1267. * File offset
  1268. * +------------------------------------------+ 0x0
  1269. * | main header (struct disk_dump_header) |
  1270. * |------------------------------------------+ block 1
  1271. * | sub header (struct kdump_sub_header) |
  1272. * |------------------------------------------+ block 2
  1273. * | 1st-dump_bitmap |
  1274. * |------------------------------------------+ block 2 + X blocks
  1275. * | 2nd-dump_bitmap | (aligned by block)
  1276. * |------------------------------------------+ block 2 + 2 * X blocks
  1277. * | page desc for pfn 0 (struct page_desc) | (aligned by block)
  1278. * | page desc for pfn 1 (struct page_desc) |
  1279. * | : |
  1280. * |------------------------------------------| (not aligned by block)
  1281. * | page data (pfn 0) |
  1282. * | page data (pfn 1) |
  1283. * | : |
  1284. * +------------------------------------------+
  1285. */
  1286. ret = write_start_flat_header(s->fd);
  1287. if (ret < 0) {
  1288. error_setg(errp, "dump: failed to write start flat header");
  1289. return;
  1290. }
  1291. write_dump_header(s, &local_err);
  1292. if (local_err) {
  1293. error_propagate(errp, local_err);
  1294. return;
  1295. }
  1296. write_dump_bitmap(s, &local_err);
  1297. if (local_err) {
  1298. error_propagate(errp, local_err);
  1299. return;
  1300. }
  1301. write_dump_pages(s, &local_err);
  1302. if (local_err) {
  1303. error_propagate(errp, local_err);
  1304. return;
  1305. }
  1306. ret = write_end_flat_header(s->fd);
  1307. if (ret < 0) {
  1308. error_setg(errp, "dump: failed to write end flat header");
  1309. return;
  1310. }
  1311. }
  1312. static ram_addr_t get_start_block(DumpState *s)
  1313. {
  1314. GuestPhysBlock *block;
  1315. if (!s->has_filter) {
  1316. s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
  1317. return 0;
  1318. }
  1319. QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
  1320. if (block->target_start >= s->begin + s->length ||
  1321. block->target_end <= s->begin) {
  1322. /* This block is out of the range */
  1323. continue;
  1324. }
  1325. s->next_block = block;
  1326. if (s->begin > block->target_start) {
  1327. s->start = s->begin - block->target_start;
  1328. } else {
  1329. s->start = 0;
  1330. }
  1331. return s->start;
  1332. }
  1333. return -1;
  1334. }
  1335. static void get_max_mapnr(DumpState *s)
  1336. {
  1337. GuestPhysBlock *last_block;
  1338. last_block = QTAILQ_LAST(&s->guest_phys_blocks.head);
  1339. s->max_mapnr = dump_paddr_to_pfn(s, last_block->target_end);
  1340. }
  1341. static DumpState dump_state_global = { .status = DUMP_STATUS_NONE };
  1342. static void dump_state_prepare(DumpState *s)
  1343. {
  1344. /* zero the struct, setting status to active */
  1345. *s = (DumpState) { .status = DUMP_STATUS_ACTIVE };
  1346. }
  1347. bool dump_in_progress(void)
  1348. {
  1349. DumpState *state = &dump_state_global;
  1350. return (atomic_read(&state->status) == DUMP_STATUS_ACTIVE);
  1351. }
  1352. /* calculate total size of memory to be dumped (taking filter into
  1353. * acoount.) */
  1354. static int64_t dump_calculate_size(DumpState *s)
  1355. {
  1356. GuestPhysBlock *block;
  1357. int64_t size = 0, total = 0, left = 0, right = 0;
  1358. QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
  1359. if (s->has_filter) {
  1360. /* calculate the overlapped region. */
  1361. left = MAX(s->begin, block->target_start);
  1362. right = MIN(s->begin + s->length, block->target_end);
  1363. size = right - left;
  1364. size = size > 0 ? size : 0;
  1365. } else {
  1366. /* count the whole region in */
  1367. size = (block->target_end - block->target_start);
  1368. }
  1369. total += size;
  1370. }
  1371. return total;
  1372. }
  1373. static void vmcoreinfo_update_phys_base(DumpState *s)
  1374. {
  1375. uint64_t size, note_head_size, name_size, phys_base;
  1376. char **lines;
  1377. uint8_t *vmci;
  1378. size_t i;
  1379. if (!note_name_equal(s, s->guest_note, "VMCOREINFO")) {
  1380. return;
  1381. }
  1382. get_note_sizes(s, s->guest_note, &note_head_size, &name_size, &size);
  1383. note_head_size = ROUND_UP(note_head_size, 4);
  1384. vmci = s->guest_note + note_head_size + ROUND_UP(name_size, 4);
  1385. *(vmci + size) = '\0';
  1386. lines = g_strsplit((char *)vmci, "\n", -1);
  1387. for (i = 0; lines[i]; i++) {
  1388. const char *prefix = NULL;
  1389. if (s->dump_info.d_machine == EM_X86_64) {
  1390. prefix = "NUMBER(phys_base)=";
  1391. } else if (s->dump_info.d_machine == EM_AARCH64) {
  1392. prefix = "NUMBER(PHYS_OFFSET)=";
  1393. }
  1394. if (prefix && g_str_has_prefix(lines[i], prefix)) {
  1395. if (qemu_strtou64(lines[i] + strlen(prefix), NULL, 16,
  1396. &phys_base) < 0) {
  1397. warn_report("Failed to read %s", prefix);
  1398. } else {
  1399. s->dump_info.phys_base = phys_base;
  1400. }
  1401. break;
  1402. }
  1403. }
  1404. g_strfreev(lines);
  1405. }
  1406. static void dump_init(DumpState *s, int fd, bool has_format,
  1407. DumpGuestMemoryFormat format, bool paging, bool has_filter,
  1408. int64_t begin, int64_t length, Error **errp)
  1409. {
  1410. VMCoreInfoState *vmci = vmcoreinfo_find();
  1411. CPUState *cpu;
  1412. int nr_cpus;
  1413. Error *err = NULL;
  1414. int ret;
  1415. s->has_format = has_format;
  1416. s->format = format;
  1417. s->written_size = 0;
  1418. /* kdump-compressed is conflict with paging and filter */
  1419. if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
  1420. assert(!paging && !has_filter);
  1421. }
  1422. if (runstate_is_running()) {
  1423. vm_stop(RUN_STATE_SAVE_VM);
  1424. s->resume = true;
  1425. } else {
  1426. s->resume = false;
  1427. }
  1428. /* If we use KVM, we should synchronize the registers before we get dump
  1429. * info or physmap info.
  1430. */
  1431. cpu_synchronize_all_states();
  1432. nr_cpus = 0;
  1433. CPU_FOREACH(cpu) {
  1434. nr_cpus++;
  1435. }
  1436. s->fd = fd;
  1437. s->has_filter = has_filter;
  1438. s->begin = begin;
  1439. s->length = length;
  1440. memory_mapping_list_init(&s->list);
  1441. guest_phys_blocks_init(&s->guest_phys_blocks);
  1442. guest_phys_blocks_append(&s->guest_phys_blocks);
  1443. s->total_size = dump_calculate_size(s);
  1444. #ifdef DEBUG_DUMP_GUEST_MEMORY
  1445. fprintf(stderr, "DUMP: total memory to dump: %lu\n", s->total_size);
  1446. #endif
  1447. /* it does not make sense to dump non-existent memory */
  1448. if (!s->total_size) {
  1449. error_setg(errp, "dump: no guest memory to dump");
  1450. goto cleanup;
  1451. }
  1452. s->start = get_start_block(s);
  1453. if (s->start == -1) {
  1454. error_setg(errp, QERR_INVALID_PARAMETER, "begin");
  1455. goto cleanup;
  1456. }
  1457. /* get dump info: endian, class and architecture.
  1458. * If the target architecture is not supported, cpu_get_dump_info() will
  1459. * return -1.
  1460. */
  1461. ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks);
  1462. if (ret < 0) {
  1463. error_setg(errp, QERR_UNSUPPORTED);
  1464. goto cleanup;
  1465. }
  1466. if (!s->dump_info.page_size) {
  1467. s->dump_info.page_size = TARGET_PAGE_SIZE;
  1468. }
  1469. s->note_size = cpu_get_note_size(s->dump_info.d_class,
  1470. s->dump_info.d_machine, nr_cpus);
  1471. if (s->note_size < 0) {
  1472. error_setg(errp, QERR_UNSUPPORTED);
  1473. goto cleanup;
  1474. }
  1475. /*
  1476. * The goal of this block is to (a) update the previously guessed
  1477. * phys_base, (b) copy the guest note out of the guest.
  1478. * Failure to do so is not fatal for dumping.
  1479. */
  1480. if (vmci) {
  1481. uint64_t addr, note_head_size, name_size, desc_size;
  1482. uint32_t size;
  1483. uint16_t format;
  1484. note_head_size = s->dump_info.d_class == ELFCLASS32 ?
  1485. sizeof(Elf32_Nhdr) : sizeof(Elf64_Nhdr);
  1486. format = le16_to_cpu(vmci->vmcoreinfo.guest_format);
  1487. size = le32_to_cpu(vmci->vmcoreinfo.size);
  1488. addr = le64_to_cpu(vmci->vmcoreinfo.paddr);
  1489. if (!vmci->has_vmcoreinfo) {
  1490. warn_report("guest note is not present");
  1491. } else if (size < note_head_size || size > MAX_GUEST_NOTE_SIZE) {
  1492. warn_report("guest note size is invalid: %" PRIu32, size);
  1493. } else if (format != FW_CFG_VMCOREINFO_FORMAT_ELF) {
  1494. warn_report("guest note format is unsupported: %" PRIu16, format);
  1495. } else {
  1496. s->guest_note = g_malloc(size + 1); /* +1 for adding \0 */
  1497. cpu_physical_memory_read(addr, s->guest_note, size);
  1498. get_note_sizes(s, s->guest_note, NULL, &name_size, &desc_size);
  1499. s->guest_note_size = ELF_NOTE_SIZE(note_head_size, name_size,
  1500. desc_size);
  1501. if (name_size > MAX_GUEST_NOTE_SIZE ||
  1502. desc_size > MAX_GUEST_NOTE_SIZE ||
  1503. s->guest_note_size > size) {
  1504. warn_report("Invalid guest note header");
  1505. g_free(s->guest_note);
  1506. s->guest_note = NULL;
  1507. } else {
  1508. vmcoreinfo_update_phys_base(s);
  1509. s->note_size += s->guest_note_size;
  1510. }
  1511. }
  1512. }
  1513. /* get memory mapping */
  1514. if (paging) {
  1515. qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, &err);
  1516. if (err != NULL) {
  1517. error_propagate(errp, err);
  1518. goto cleanup;
  1519. }
  1520. } else {
  1521. qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks);
  1522. }
  1523. s->nr_cpus = nr_cpus;
  1524. get_max_mapnr(s);
  1525. uint64_t tmp;
  1526. tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT),
  1527. s->dump_info.page_size);
  1528. s->len_dump_bitmap = tmp * s->dump_info.page_size;
  1529. /* init for kdump-compressed format */
  1530. if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
  1531. switch (format) {
  1532. case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB:
  1533. s->flag_compress = DUMP_DH_COMPRESSED_ZLIB;
  1534. break;
  1535. case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO:
  1536. #ifdef CONFIG_LZO
  1537. if (lzo_init() != LZO_E_OK) {
  1538. error_setg(errp, "failed to initialize the LZO library");
  1539. goto cleanup;
  1540. }
  1541. #endif
  1542. s->flag_compress = DUMP_DH_COMPRESSED_LZO;
  1543. break;
  1544. case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY:
  1545. s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY;
  1546. break;
  1547. default:
  1548. s->flag_compress = 0;
  1549. }
  1550. return;
  1551. }
  1552. if (s->has_filter) {
  1553. memory_mapping_filter(&s->list, s->begin, s->length);
  1554. }
  1555. /*
  1556. * calculate phdr_num
  1557. *
  1558. * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
  1559. */
  1560. s->phdr_num = 1; /* PT_NOTE */
  1561. if (s->list.num < UINT16_MAX - 2) {
  1562. s->phdr_num += s->list.num;
  1563. s->have_section = false;
  1564. } else {
  1565. s->have_section = true;
  1566. s->phdr_num = PN_XNUM;
  1567. s->sh_info = 1; /* PT_NOTE */
  1568. /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
  1569. if (s->list.num <= UINT32_MAX - 1) {
  1570. s->sh_info += s->list.num;
  1571. } else {
  1572. s->sh_info = UINT32_MAX;
  1573. }
  1574. }
  1575. if (s->dump_info.d_class == ELFCLASS64) {
  1576. if (s->have_section) {
  1577. s->memory_offset = sizeof(Elf64_Ehdr) +
  1578. sizeof(Elf64_Phdr) * s->sh_info +
  1579. sizeof(Elf64_Shdr) + s->note_size;
  1580. } else {
  1581. s->memory_offset = sizeof(Elf64_Ehdr) +
  1582. sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
  1583. }
  1584. } else {
  1585. if (s->have_section) {
  1586. s->memory_offset = sizeof(Elf32_Ehdr) +
  1587. sizeof(Elf32_Phdr) * s->sh_info +
  1588. sizeof(Elf32_Shdr) + s->note_size;
  1589. } else {
  1590. s->memory_offset = sizeof(Elf32_Ehdr) +
  1591. sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
  1592. }
  1593. }
  1594. return;
  1595. cleanup:
  1596. dump_cleanup(s);
  1597. }
  1598. /* this operation might be time consuming. */
  1599. static void dump_process(DumpState *s, Error **errp)
  1600. {
  1601. Error *local_err = NULL;
  1602. DumpQueryResult *result = NULL;
  1603. if (s->has_format && s->format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) {
  1604. #ifdef TARGET_X86_64
  1605. create_win_dump(s, &local_err);
  1606. #endif
  1607. } else if (s->has_format && s->format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
  1608. create_kdump_vmcore(s, &local_err);
  1609. } else {
  1610. create_vmcore(s, &local_err);
  1611. }
  1612. /* make sure status is written after written_size updates */
  1613. smp_wmb();
  1614. atomic_set(&s->status,
  1615. (local_err ? DUMP_STATUS_FAILED : DUMP_STATUS_COMPLETED));
  1616. /* send DUMP_COMPLETED message (unconditionally) */
  1617. result = qmp_query_dump(NULL);
  1618. /* should never fail */
  1619. assert(result);
  1620. qapi_event_send_dump_completed(result, !!local_err, (local_err ? \
  1621. error_get_pretty(local_err) : NULL));
  1622. qapi_free_DumpQueryResult(result);
  1623. error_propagate(errp, local_err);
  1624. dump_cleanup(s);
  1625. }
  1626. static void *dump_thread(void *data)
  1627. {
  1628. DumpState *s = (DumpState *)data;
  1629. dump_process(s, NULL);
  1630. return NULL;
  1631. }
  1632. DumpQueryResult *qmp_query_dump(Error **errp)
  1633. {
  1634. DumpQueryResult *result = g_new(DumpQueryResult, 1);
  1635. DumpState *state = &dump_state_global;
  1636. result->status = atomic_read(&state->status);
  1637. /* make sure we are reading status and written_size in order */
  1638. smp_rmb();
  1639. result->completed = state->written_size;
  1640. result->total = state->total_size;
  1641. return result;
  1642. }
  1643. void qmp_dump_guest_memory(bool paging, const char *file,
  1644. bool has_detach, bool detach,
  1645. bool has_begin, int64_t begin, bool has_length,
  1646. int64_t length, bool has_format,
  1647. DumpGuestMemoryFormat format, Error **errp)
  1648. {
  1649. const char *p;
  1650. int fd = -1;
  1651. DumpState *s;
  1652. Error *local_err = NULL;
  1653. bool detach_p = false;
  1654. if (runstate_check(RUN_STATE_INMIGRATE)) {
  1655. error_setg(errp, "Dump not allowed during incoming migration.");
  1656. return;
  1657. }
  1658. /* if there is a dump in background, we should wait until the dump
  1659. * finished */
  1660. if (dump_in_progress()) {
  1661. error_setg(errp, "There is a dump in process, please wait.");
  1662. return;
  1663. }
  1664. /*
  1665. * kdump-compressed format need the whole memory dumped, so paging or
  1666. * filter is not supported here.
  1667. */
  1668. if ((has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) &&
  1669. (paging || has_begin || has_length)) {
  1670. error_setg(errp, "kdump-compressed format doesn't support paging or "
  1671. "filter");
  1672. return;
  1673. }
  1674. if (has_begin && !has_length) {
  1675. error_setg(errp, QERR_MISSING_PARAMETER, "length");
  1676. return;
  1677. }
  1678. if (!has_begin && has_length) {
  1679. error_setg(errp, QERR_MISSING_PARAMETER, "begin");
  1680. return;
  1681. }
  1682. if (has_detach) {
  1683. detach_p = detach;
  1684. }
  1685. /* check whether lzo/snappy is supported */
  1686. #ifndef CONFIG_LZO
  1687. if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO) {
  1688. error_setg(errp, "kdump-lzo is not available now");
  1689. return;
  1690. }
  1691. #endif
  1692. #ifndef CONFIG_SNAPPY
  1693. if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY) {
  1694. error_setg(errp, "kdump-snappy is not available now");
  1695. return;
  1696. }
  1697. #endif
  1698. #ifndef TARGET_X86_64
  1699. if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) {
  1700. error_setg(errp, "Windows dump is only available for x86-64");
  1701. return;
  1702. }
  1703. #endif
  1704. #if !defined(WIN32)
  1705. if (strstart(file, "fd:", &p)) {
  1706. fd = monitor_get_fd(cur_mon, p, errp);
  1707. if (fd == -1) {
  1708. return;
  1709. }
  1710. }
  1711. #endif
  1712. if (strstart(file, "file:", &p)) {
  1713. fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
  1714. if (fd < 0) {
  1715. error_setg_file_open(errp, errno, p);
  1716. return;
  1717. }
  1718. }
  1719. if (fd == -1) {
  1720. error_setg(errp, QERR_INVALID_PARAMETER, "protocol");
  1721. return;
  1722. }
  1723. s = &dump_state_global;
  1724. dump_state_prepare(s);
  1725. dump_init(s, fd, has_format, format, paging, has_begin,
  1726. begin, length, &local_err);
  1727. if (local_err) {
  1728. error_propagate(errp, local_err);
  1729. atomic_set(&s->status, DUMP_STATUS_FAILED);
  1730. return;
  1731. }
  1732. if (detach_p) {
  1733. /* detached dump */
  1734. s->detached = true;
  1735. qemu_thread_create(&s->dump_thread, "dump_thread", dump_thread,
  1736. s, QEMU_THREAD_DETACHED);
  1737. } else {
  1738. /* sync dump */
  1739. dump_process(s, errp);
  1740. }
  1741. }
  1742. DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp)
  1743. {
  1744. DumpGuestMemoryFormatList *item;
  1745. DumpGuestMemoryCapability *cap =
  1746. g_malloc0(sizeof(DumpGuestMemoryCapability));
  1747. /* elf is always available */
  1748. item = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1749. cap->formats = item;
  1750. item->value = DUMP_GUEST_MEMORY_FORMAT_ELF;
  1751. /* kdump-zlib is always available */
  1752. item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1753. item = item->next;
  1754. item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB;
  1755. /* add new item if kdump-lzo is available */
  1756. #ifdef CONFIG_LZO
  1757. item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1758. item = item->next;
  1759. item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO;
  1760. #endif
  1761. /* add new item if kdump-snappy is available */
  1762. #ifdef CONFIG_SNAPPY
  1763. item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1764. item = item->next;
  1765. item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY;
  1766. #endif
  1767. /* Windows dump is available only if target is x86_64 */
  1768. #ifdef TARGET_X86_64
  1769. item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1770. item = item->next;
  1771. item->value = DUMP_GUEST_MEMORY_FORMAT_WIN_DMP;
  1772. #endif
  1773. return cap;
  1774. }