You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

memory_ldst.inc.c 16KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580
  1. /*
  2. * Physical memory access templates
  3. *
  4. * Copyright (c) 2003 Fabrice Bellard
  5. * Copyright (c) 2015 Linaro, Inc.
  6. * Copyright (c) 2016 Red Hat, Inc.
  7. *
  8. * This library is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2 of the License, or (at your option) any later version.
  12. *
  13. * This library is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  20. */
  21. /* warning: addr must be aligned */
  22. static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
  23. hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
  24. enum device_endian endian)
  25. {
  26. uint8_t *ptr;
  27. uint64_t val;
  28. MemoryRegion *mr;
  29. hwaddr l = 4;
  30. hwaddr addr1;
  31. MemTxResult r;
  32. bool release_lock = false;
  33. RCU_READ_LOCK();
  34. mr = TRANSLATE(addr, &addr1, &l, false, attrs);
  35. if (l < 4 || !memory_access_is_direct(mr, false)) {
  36. release_lock |= prepare_mmio_access(mr);
  37. /* I/O case */
  38. r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
  39. #if defined(TARGET_WORDS_BIGENDIAN)
  40. if (endian == DEVICE_LITTLE_ENDIAN) {
  41. val = bswap32(val);
  42. }
  43. #else
  44. if (endian == DEVICE_BIG_ENDIAN) {
  45. val = bswap32(val);
  46. }
  47. #endif
  48. } else {
  49. /* RAM case */
  50. ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
  51. switch (endian) {
  52. case DEVICE_LITTLE_ENDIAN:
  53. val = ldl_le_p(ptr);
  54. break;
  55. case DEVICE_BIG_ENDIAN:
  56. val = ldl_be_p(ptr);
  57. break;
  58. default:
  59. val = ldl_p(ptr);
  60. break;
  61. }
  62. r = MEMTX_OK;
  63. }
  64. if (result) {
  65. *result = r;
  66. }
  67. if (release_lock) {
  68. qemu_mutex_unlock_iothread();
  69. }
  70. RCU_READ_UNLOCK();
  71. return val;
  72. }
  73. uint32_t glue(address_space_ldl, SUFFIX)(ARG1_DECL,
  74. hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
  75. {
  76. return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
  77. DEVICE_NATIVE_ENDIAN);
  78. }
  79. uint32_t glue(address_space_ldl_le, SUFFIX)(ARG1_DECL,
  80. hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
  81. {
  82. return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
  83. DEVICE_LITTLE_ENDIAN);
  84. }
  85. uint32_t glue(address_space_ldl_be, SUFFIX)(ARG1_DECL,
  86. hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
  87. {
  88. return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
  89. DEVICE_BIG_ENDIAN);
  90. }
  91. /* warning: addr must be aligned */
  92. static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL,
  93. hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
  94. enum device_endian endian)
  95. {
  96. uint8_t *ptr;
  97. uint64_t val;
  98. MemoryRegion *mr;
  99. hwaddr l = 8;
  100. hwaddr addr1;
  101. MemTxResult r;
  102. bool release_lock = false;
  103. RCU_READ_LOCK();
  104. mr = TRANSLATE(addr, &addr1, &l, false, attrs);
  105. if (l < 8 || !memory_access_is_direct(mr, false)) {
  106. release_lock |= prepare_mmio_access(mr);
  107. /* I/O case */
  108. r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
  109. #if defined(TARGET_WORDS_BIGENDIAN)
  110. if (endian == DEVICE_LITTLE_ENDIAN) {
  111. val = bswap64(val);
  112. }
  113. #else
  114. if (endian == DEVICE_BIG_ENDIAN) {
  115. val = bswap64(val);
  116. }
  117. #endif
  118. } else {
  119. /* RAM case */
  120. ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
  121. switch (endian) {
  122. case DEVICE_LITTLE_ENDIAN:
  123. val = ldq_le_p(ptr);
  124. break;
  125. case DEVICE_BIG_ENDIAN:
  126. val = ldq_be_p(ptr);
  127. break;
  128. default:
  129. val = ldq_p(ptr);
  130. break;
  131. }
  132. r = MEMTX_OK;
  133. }
  134. if (result) {
  135. *result = r;
  136. }
  137. if (release_lock) {
  138. qemu_mutex_unlock_iothread();
  139. }
  140. RCU_READ_UNLOCK();
  141. return val;
  142. }
  143. uint64_t glue(address_space_ldq, SUFFIX)(ARG1_DECL,
  144. hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
  145. {
  146. return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
  147. DEVICE_NATIVE_ENDIAN);
  148. }
  149. uint64_t glue(address_space_ldq_le, SUFFIX)(ARG1_DECL,
  150. hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
  151. {
  152. return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
  153. DEVICE_LITTLE_ENDIAN);
  154. }
  155. uint64_t glue(address_space_ldq_be, SUFFIX)(ARG1_DECL,
  156. hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
  157. {
  158. return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
  159. DEVICE_BIG_ENDIAN);
  160. }
  161. uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
  162. hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
  163. {
  164. uint8_t *ptr;
  165. uint64_t val;
  166. MemoryRegion *mr;
  167. hwaddr l = 1;
  168. hwaddr addr1;
  169. MemTxResult r;
  170. bool release_lock = false;
  171. RCU_READ_LOCK();
  172. mr = TRANSLATE(addr, &addr1, &l, false, attrs);
  173. if (!memory_access_is_direct(mr, false)) {
  174. release_lock |= prepare_mmio_access(mr);
  175. /* I/O case */
  176. r = memory_region_dispatch_read(mr, addr1, &val, 1, attrs);
  177. } else {
  178. /* RAM case */
  179. ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
  180. val = ldub_p(ptr);
  181. r = MEMTX_OK;
  182. }
  183. if (result) {
  184. *result = r;
  185. }
  186. if (release_lock) {
  187. qemu_mutex_unlock_iothread();
  188. }
  189. RCU_READ_UNLOCK();
  190. return val;
  191. }
  192. /* warning: addr must be aligned */
  193. static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
  194. hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
  195. enum device_endian endian)
  196. {
  197. uint8_t *ptr;
  198. uint64_t val;
  199. MemoryRegion *mr;
  200. hwaddr l = 2;
  201. hwaddr addr1;
  202. MemTxResult r;
  203. bool release_lock = false;
  204. RCU_READ_LOCK();
  205. mr = TRANSLATE(addr, &addr1, &l, false, attrs);
  206. if (l < 2 || !memory_access_is_direct(mr, false)) {
  207. release_lock |= prepare_mmio_access(mr);
  208. /* I/O case */
  209. r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
  210. #if defined(TARGET_WORDS_BIGENDIAN)
  211. if (endian == DEVICE_LITTLE_ENDIAN) {
  212. val = bswap16(val);
  213. }
  214. #else
  215. if (endian == DEVICE_BIG_ENDIAN) {
  216. val = bswap16(val);
  217. }
  218. #endif
  219. } else {
  220. /* RAM case */
  221. ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
  222. switch (endian) {
  223. case DEVICE_LITTLE_ENDIAN:
  224. val = lduw_le_p(ptr);
  225. break;
  226. case DEVICE_BIG_ENDIAN:
  227. val = lduw_be_p(ptr);
  228. break;
  229. default:
  230. val = lduw_p(ptr);
  231. break;
  232. }
  233. r = MEMTX_OK;
  234. }
  235. if (result) {
  236. *result = r;
  237. }
  238. if (release_lock) {
  239. qemu_mutex_unlock_iothread();
  240. }
  241. RCU_READ_UNLOCK();
  242. return val;
  243. }
  244. uint32_t glue(address_space_lduw, SUFFIX)(ARG1_DECL,
  245. hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
  246. {
  247. return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
  248. DEVICE_NATIVE_ENDIAN);
  249. }
  250. uint32_t glue(address_space_lduw_le, SUFFIX)(ARG1_DECL,
  251. hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
  252. {
  253. return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
  254. DEVICE_LITTLE_ENDIAN);
  255. }
  256. uint32_t glue(address_space_lduw_be, SUFFIX)(ARG1_DECL,
  257. hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
  258. {
  259. return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
  260. DEVICE_BIG_ENDIAN);
  261. }
  262. /* warning: addr must be aligned. The ram page is not masked as dirty
  263. and the code inside is not invalidated. It is useful if the dirty
  264. bits are used to track modified PTEs */
  265. void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
  266. hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
  267. {
  268. uint8_t *ptr;
  269. MemoryRegion *mr;
  270. hwaddr l = 4;
  271. hwaddr addr1;
  272. MemTxResult r;
  273. uint8_t dirty_log_mask;
  274. bool release_lock = false;
  275. RCU_READ_LOCK();
  276. mr = TRANSLATE(addr, &addr1, &l, true, attrs);
  277. if (l < 4 || !memory_access_is_direct(mr, true)) {
  278. release_lock |= prepare_mmio_access(mr);
  279. r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
  280. } else {
  281. ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
  282. stl_p(ptr, val);
  283. dirty_log_mask = memory_region_get_dirty_log_mask(mr);
  284. dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
  285. cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
  286. 4, dirty_log_mask);
  287. r = MEMTX_OK;
  288. }
  289. if (result) {
  290. *result = r;
  291. }
  292. if (release_lock) {
  293. qemu_mutex_unlock_iothread();
  294. }
  295. RCU_READ_UNLOCK();
  296. }
  297. /* warning: addr must be aligned */
  298. static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL,
  299. hwaddr addr, uint32_t val, MemTxAttrs attrs,
  300. MemTxResult *result, enum device_endian endian)
  301. {
  302. uint8_t *ptr;
  303. MemoryRegion *mr;
  304. hwaddr l = 4;
  305. hwaddr addr1;
  306. MemTxResult r;
  307. bool release_lock = false;
  308. RCU_READ_LOCK();
  309. mr = TRANSLATE(addr, &addr1, &l, true, attrs);
  310. if (l < 4 || !memory_access_is_direct(mr, true)) {
  311. release_lock |= prepare_mmio_access(mr);
  312. #if defined(TARGET_WORDS_BIGENDIAN)
  313. if (endian == DEVICE_LITTLE_ENDIAN) {
  314. val = bswap32(val);
  315. }
  316. #else
  317. if (endian == DEVICE_BIG_ENDIAN) {
  318. val = bswap32(val);
  319. }
  320. #endif
  321. r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
  322. } else {
  323. /* RAM case */
  324. ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
  325. switch (endian) {
  326. case DEVICE_LITTLE_ENDIAN:
  327. stl_le_p(ptr, val);
  328. break;
  329. case DEVICE_BIG_ENDIAN:
  330. stl_be_p(ptr, val);
  331. break;
  332. default:
  333. stl_p(ptr, val);
  334. break;
  335. }
  336. invalidate_and_set_dirty(mr, addr1, 4);
  337. r = MEMTX_OK;
  338. }
  339. if (result) {
  340. *result = r;
  341. }
  342. if (release_lock) {
  343. qemu_mutex_unlock_iothread();
  344. }
  345. RCU_READ_UNLOCK();
  346. }
  347. void glue(address_space_stl, SUFFIX)(ARG1_DECL,
  348. hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
  349. {
  350. glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
  351. result, DEVICE_NATIVE_ENDIAN);
  352. }
  353. void glue(address_space_stl_le, SUFFIX)(ARG1_DECL,
  354. hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
  355. {
  356. glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
  357. result, DEVICE_LITTLE_ENDIAN);
  358. }
  359. void glue(address_space_stl_be, SUFFIX)(ARG1_DECL,
  360. hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
  361. {
  362. glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
  363. result, DEVICE_BIG_ENDIAN);
  364. }
  365. void glue(address_space_stb, SUFFIX)(ARG1_DECL,
  366. hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
  367. {
  368. uint8_t *ptr;
  369. MemoryRegion *mr;
  370. hwaddr l = 1;
  371. hwaddr addr1;
  372. MemTxResult r;
  373. bool release_lock = false;
  374. RCU_READ_LOCK();
  375. mr = TRANSLATE(addr, &addr1, &l, true, attrs);
  376. if (!memory_access_is_direct(mr, true)) {
  377. release_lock |= prepare_mmio_access(mr);
  378. r = memory_region_dispatch_write(mr, addr1, val, 1, attrs);
  379. } else {
  380. /* RAM case */
  381. ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
  382. stb_p(ptr, val);
  383. invalidate_and_set_dirty(mr, addr1, 1);
  384. r = MEMTX_OK;
  385. }
  386. if (result) {
  387. *result = r;
  388. }
  389. if (release_lock) {
  390. qemu_mutex_unlock_iothread();
  391. }
  392. RCU_READ_UNLOCK();
  393. }
  394. /* warning: addr must be aligned */
  395. static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL,
  396. hwaddr addr, uint32_t val, MemTxAttrs attrs,
  397. MemTxResult *result, enum device_endian endian)
  398. {
  399. uint8_t *ptr;
  400. MemoryRegion *mr;
  401. hwaddr l = 2;
  402. hwaddr addr1;
  403. MemTxResult r;
  404. bool release_lock = false;
  405. RCU_READ_LOCK();
  406. mr = TRANSLATE(addr, &addr1, &l, true, attrs);
  407. if (l < 2 || !memory_access_is_direct(mr, true)) {
  408. release_lock |= prepare_mmio_access(mr);
  409. #if defined(TARGET_WORDS_BIGENDIAN)
  410. if (endian == DEVICE_LITTLE_ENDIAN) {
  411. val = bswap16(val);
  412. }
  413. #else
  414. if (endian == DEVICE_BIG_ENDIAN) {
  415. val = bswap16(val);
  416. }
  417. #endif
  418. r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
  419. } else {
  420. /* RAM case */
  421. ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
  422. switch (endian) {
  423. case DEVICE_LITTLE_ENDIAN:
  424. stw_le_p(ptr, val);
  425. break;
  426. case DEVICE_BIG_ENDIAN:
  427. stw_be_p(ptr, val);
  428. break;
  429. default:
  430. stw_p(ptr, val);
  431. break;
  432. }
  433. invalidate_and_set_dirty(mr, addr1, 2);
  434. r = MEMTX_OK;
  435. }
  436. if (result) {
  437. *result = r;
  438. }
  439. if (release_lock) {
  440. qemu_mutex_unlock_iothread();
  441. }
  442. RCU_READ_UNLOCK();
  443. }
  444. void glue(address_space_stw, SUFFIX)(ARG1_DECL,
  445. hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
  446. {
  447. glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
  448. DEVICE_NATIVE_ENDIAN);
  449. }
  450. void glue(address_space_stw_le, SUFFIX)(ARG1_DECL,
  451. hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
  452. {
  453. glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
  454. DEVICE_LITTLE_ENDIAN);
  455. }
  456. void glue(address_space_stw_be, SUFFIX)(ARG1_DECL,
  457. hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
  458. {
  459. glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
  460. DEVICE_BIG_ENDIAN);
  461. }
  462. static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL,
  463. hwaddr addr, uint64_t val, MemTxAttrs attrs,
  464. MemTxResult *result, enum device_endian endian)
  465. {
  466. uint8_t *ptr;
  467. MemoryRegion *mr;
  468. hwaddr l = 8;
  469. hwaddr addr1;
  470. MemTxResult r;
  471. bool release_lock = false;
  472. RCU_READ_LOCK();
  473. mr = TRANSLATE(addr, &addr1, &l, true, attrs);
  474. if (l < 8 || !memory_access_is_direct(mr, true)) {
  475. release_lock |= prepare_mmio_access(mr);
  476. #if defined(TARGET_WORDS_BIGENDIAN)
  477. if (endian == DEVICE_LITTLE_ENDIAN) {
  478. val = bswap64(val);
  479. }
  480. #else
  481. if (endian == DEVICE_BIG_ENDIAN) {
  482. val = bswap64(val);
  483. }
  484. #endif
  485. r = memory_region_dispatch_write(mr, addr1, val, 8, attrs);
  486. } else {
  487. /* RAM case */
  488. ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
  489. switch (endian) {
  490. case DEVICE_LITTLE_ENDIAN:
  491. stq_le_p(ptr, val);
  492. break;
  493. case DEVICE_BIG_ENDIAN:
  494. stq_be_p(ptr, val);
  495. break;
  496. default:
  497. stq_p(ptr, val);
  498. break;
  499. }
  500. invalidate_and_set_dirty(mr, addr1, 8);
  501. r = MEMTX_OK;
  502. }
  503. if (result) {
  504. *result = r;
  505. }
  506. if (release_lock) {
  507. qemu_mutex_unlock_iothread();
  508. }
  509. RCU_READ_UNLOCK();
  510. }
  511. void glue(address_space_stq, SUFFIX)(ARG1_DECL,
  512. hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
  513. {
  514. glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
  515. DEVICE_NATIVE_ENDIAN);
  516. }
  517. void glue(address_space_stq_le, SUFFIX)(ARG1_DECL,
  518. hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
  519. {
  520. glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
  521. DEVICE_LITTLE_ENDIAN);
  522. }
  523. void glue(address_space_stq_be, SUFFIX)(ARG1_DECL,
  524. hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
  525. {
  526. glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
  527. DEVICE_BIG_ENDIAN);
  528. }
  529. #undef ARG1_DECL
  530. #undef ARG1
  531. #undef SUFFIX
  532. #undef TRANSLATE
  533. #undef RCU_READ_LOCK
  534. #undef RCU_READ_UNLOCK