Team Fortress 2 Source Code as on 22/4/2020
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

632 lines
17 KiB

  1. #include <assert.h>
  2. #include <stdio.h>
  3. #include <limits.h>
  4. #include <stdlib.h>
  5. #define FAST_CHUNK // disabling this enables the old, slower path that deblocks into a regular form
  6. #include "cave_parse.h"
  7. #include "stb_image.h"
  8. #include "stb.h"
  9. #define NUM_CHUNKS_PER_REGION 32 // only on one axis
  10. #define NUM_CHUNKS_PER_REGION_LOG2 5
  11. #define NUM_COLUMNS_PER_CHUNK 16
  12. #define NUM_COLUMNS_PER_CHUNK_LOG2 4
  13. uint32 read_uint32_be(FILE *f)
  14. {
  15. unsigned char data[4];
  16. fread(data, 1, 4, f);
  17. return (data[0]<<24) + (data[1]<<16) + (data[2]<<8) + data[3];
  18. }
  19. typedef struct
  20. {
  21. uint8 *data;
  22. size_t len;
  23. int x,z; // chunk index
  24. int refcount; // for multi-threading
  25. } compressed_chunk;
  26. typedef struct
  27. {
  28. int x,z;
  29. uint32 sector_data[NUM_CHUNKS_PER_REGION][NUM_CHUNKS_PER_REGION];
  30. } region;
  31. size_t cached_compressed=0;
  32. FILE *last_region;
  33. int last_region_x;
  34. int last_region_z;
  35. int opened=0;
  36. static void open_file(int reg_x, int reg_z)
  37. {
  38. if (!opened || last_region_x != reg_x || last_region_z != reg_z) {
  39. char filename[256];
  40. if (last_region != NULL)
  41. fclose(last_region);
  42. sprintf(filename, "r.%d.%d.mca", reg_x, reg_z);
  43. last_region = fopen(filename, "rb");
  44. last_region_x = reg_x;
  45. last_region_z = reg_z;
  46. opened = 1;
  47. }
  48. }
  49. static region *load_region(int reg_x, int reg_z)
  50. {
  51. region *r;
  52. int x,z;
  53. open_file(reg_x, reg_z);
  54. r = malloc(sizeof(*r));
  55. if (last_region == NULL) {
  56. memset(r, 0, sizeof(*r));
  57. } else {
  58. fseek(last_region, 0, SEEK_SET);
  59. for (z=0; z < NUM_CHUNKS_PER_REGION; ++z)
  60. for (x=0; x < NUM_CHUNKS_PER_REGION; ++x)
  61. r->sector_data[z][x] = read_uint32_be(last_region);
  62. }
  63. r->x = reg_x;
  64. r->z = reg_z;
  65. return r;
  66. }
  67. void free_region(region *r)
  68. {
  69. free(r);
  70. }
  71. #define MAX_MAP_REGIONS 64 // in one axis: 64 regions * 32 chunk/region * 16 columns/chunk = 16384 columns
  72. region *regions[MAX_MAP_REGIONS][MAX_MAP_REGIONS];
  73. static region *get_region(int reg_x, int reg_z)
  74. {
  75. int slot_x = reg_x & (MAX_MAP_REGIONS-1);
  76. int slot_z = reg_z & (MAX_MAP_REGIONS-1);
  77. region *r;
  78. r = regions[slot_z][slot_x];
  79. if (r) {
  80. if (r->x == reg_x && r->z == reg_z)
  81. return r;
  82. free_region(r);
  83. }
  84. r = load_region(reg_x, reg_z);
  85. regions[slot_z][slot_x] = r;
  86. return r;
  87. }
  88. // about one region, so size should be ok
  89. #define NUM_CACHED_X 64
  90. #define NUM_CACHED_Z 64
  91. // @TODO: is it really worth caching these? we probably can just
  92. // pull them from the disk cache nearly as efficiently.
  93. // Can test that by setting to 1x1?
  94. compressed_chunk *cached_chunk[NUM_CACHED_Z][NUM_CACHED_X];
  95. static void deref_compressed_chunk(compressed_chunk *cc)
  96. {
  97. assert(cc->refcount > 0);
  98. --cc->refcount;
  99. if (cc->refcount == 0) {
  100. if (cc->data)
  101. free(cc->data);
  102. free(cc);
  103. }
  104. }
  105. static compressed_chunk *get_compressed_chunk(int chunk_x, int chunk_z)
  106. {
  107. int slot_x = chunk_x & (NUM_CACHED_X-1);
  108. int slot_z = chunk_z & (NUM_CACHED_Z-1);
  109. compressed_chunk *cc = cached_chunk[slot_z][slot_x];
  110. if (cc && cc->x == chunk_x && cc->z == chunk_z)
  111. return cc;
  112. else {
  113. int reg_x = chunk_x >> NUM_CHUNKS_PER_REGION_LOG2;
  114. int reg_z = chunk_z >> NUM_CHUNKS_PER_REGION_LOG2;
  115. region *r = get_region(reg_x, reg_z);
  116. if (cc) {
  117. deref_compressed_chunk(cc);
  118. cached_chunk[slot_z][slot_x] = NULL;
  119. }
  120. cc = malloc(sizeof(*cc));
  121. cc->x = chunk_x;
  122. cc->z = chunk_z;
  123. {
  124. int subchunk_x = chunk_x & (NUM_CHUNKS_PER_REGION-1);
  125. int subchunk_z = chunk_z & (NUM_CHUNKS_PER_REGION-1);
  126. uint32 code = r->sector_data[subchunk_z][subchunk_x];
  127. if (code & 255) {
  128. open_file(reg_x, reg_z);
  129. fseek(last_region, (code>>8)*4096, SEEK_SET);
  130. cc->len = (code&255)*4096;
  131. cc->data = malloc(cc->len);
  132. fread(cc->data, 1, cc->len, last_region);
  133. } else {
  134. cc->len = 0;
  135. cc->data = 0;
  136. }
  137. }
  138. cc->refcount = 1;
  139. cached_chunk[slot_z][slot_x] = cc;
  140. return cc;
  141. }
  142. }
  143. // NBT parser -- can automatically parse stuff we don't
  144. // have definitions for, but want to explicitly parse
  145. // stuff we do have definitions for.
  146. //
  147. // option 1: auto-parse everything into data structures,
  148. // then read those
  149. //
  150. // option 2: have a "parse next object" which
  151. // doesn't resolve whether it expands its children
  152. // yet, and then the user either says "expand" or
  153. // "skip" after looking at the name. Anything with
  154. // "children" without names can't go through this
  155. // interface.
  156. //
  157. // Let's try option 2.
  158. typedef struct
  159. {
  160. unsigned char *buffer_start;
  161. unsigned char *buffer_end;
  162. unsigned char *cur;
  163. int nesting;
  164. char temp_buffer[256];
  165. } nbt;
  166. enum { TAG_End=0, TAG_Byte=1, TAG_Short=2, TAG_Int=3, TAG_Long=4,
  167. TAG_Float=5, TAG_Double=6, TAG_Byte_Array=7, TAG_String=8,
  168. TAG_List=9, TAG_Compound=10, TAG_Int_Array=11 };
  169. static void nbt_get_string_data(unsigned char *data, char *buffer, size_t bufsize)
  170. {
  171. int len = data[0]*256 + data[1];
  172. int i;
  173. for (i=0; i < len && i+1 < (int) bufsize; ++i)
  174. buffer[i] = (char) data[i+2];
  175. buffer[i] = 0;
  176. }
  177. static char *nbt_peek(nbt *n)
  178. {
  179. unsigned char type = *n->cur;
  180. if (type == TAG_End)
  181. return NULL;
  182. nbt_get_string_data(n->cur+1, n->temp_buffer, sizeof(n->temp_buffer));
  183. return n->temp_buffer;
  184. }
  185. static uint32 nbt_parse_uint32(unsigned char *buffer)
  186. {
  187. return (buffer[0] << 24) + (buffer[1]<<16) + (buffer[2]<<8) + buffer[3];
  188. }
  189. static void nbt_skip(nbt *n);
  190. // skip an item that doesn't have an id or name prefix (usable in lists)
  191. static void nbt_skip_raw(nbt *n, unsigned char type)
  192. {
  193. switch (type) {
  194. case TAG_Byte : n->cur += 1; break;
  195. case TAG_Short : n->cur += 2; break;
  196. case TAG_Int : n->cur += 4; break;
  197. case TAG_Long : n->cur += 8; break;
  198. case TAG_Float : n->cur += 4; break;
  199. case TAG_Double: n->cur += 8; break;
  200. case TAG_Byte_Array: n->cur += 4 + 1*nbt_parse_uint32(n->cur); break;
  201. case TAG_Int_Array : n->cur += 4 + 4*nbt_parse_uint32(n->cur); break;
  202. case TAG_String : n->cur += 2 + (n->cur[0]*256 + n->cur[1]); break;
  203. case TAG_List : {
  204. unsigned char list_type = *n->cur++;
  205. unsigned int list_len = nbt_parse_uint32(n->cur);
  206. unsigned int i;
  207. n->cur += 4; // list_len
  208. for (i=0; i < list_len; ++i)
  209. nbt_skip_raw(n, list_type);
  210. break;
  211. }
  212. case TAG_Compound : {
  213. while (*n->cur != TAG_End)
  214. nbt_skip(n);
  215. nbt_skip(n); // skip the TAG_end
  216. break;
  217. }
  218. }
  219. assert(n->cur <= n->buffer_end);
  220. }
  221. static void nbt_skip(nbt *n)
  222. {
  223. unsigned char type = *n->cur++;
  224. if (type == TAG_End)
  225. return;
  226. // skip name
  227. n->cur += (n->cur[0]*256 + n->cur[1]) + 2;
  228. nbt_skip_raw(n, type);
  229. }
  230. // byteswap
  231. static void nbt_swap(unsigned char *ptr, int len)
  232. {
  233. int i;
  234. for (i=0; i < (len>>1); ++i) {
  235. unsigned char t = ptr[i];
  236. ptr[i] = ptr[len-1-i];
  237. ptr[len-1-i] = t;
  238. }
  239. }
  240. // pass in the expected type, fail if doesn't match
  241. // returns a pointer to the data, byteswapped if appropriate
  242. static void *nbt_get_fromlist(nbt *n, unsigned char type, int *len)
  243. {
  244. unsigned char *ptr;
  245. assert(type != TAG_Compound);
  246. assert(type != TAG_List); // we could support getting lists of primitives as if they were arrays, but eh
  247. if (len) *len = 1;
  248. ptr = n->cur;
  249. switch (type) {
  250. case TAG_Byte : break;
  251. case TAG_Short : nbt_swap(ptr, 2); break;
  252. case TAG_Int : nbt_swap(ptr, 4); break;
  253. case TAG_Long : nbt_swap(ptr, 8); break;
  254. case TAG_Float : nbt_swap(ptr, 4); break;
  255. case TAG_Double: nbt_swap(ptr, 8); break;
  256. case TAG_Byte_Array:
  257. *len = nbt_parse_uint32(ptr);
  258. ptr += 4;
  259. break;
  260. case TAG_Int_Array: {
  261. int i;
  262. *len = nbt_parse_uint32(ptr);
  263. ptr += 4;
  264. for (i=0; i < *len; ++i)
  265. nbt_swap(ptr + 4*i, 4);
  266. break;
  267. }
  268. default: assert(0); // unhandled case
  269. }
  270. nbt_skip_raw(n, type);
  271. return ptr;
  272. }
  273. static void *nbt_get(nbt *n, unsigned char type, int *len)
  274. {
  275. assert(n->cur[0] == type);
  276. n->cur += 3 + (n->cur[1]*256+n->cur[2]);
  277. return nbt_get_fromlist(n, type, len);
  278. }
  279. static void nbt_begin_compound(nbt *n) // start a compound
  280. {
  281. assert(*n->cur == TAG_Compound);
  282. // skip header
  283. n->cur += 3 + (n->cur[1]*256 + n->cur[2]);
  284. ++n->nesting;
  285. }
  286. static void nbt_begin_compound_in_list(nbt *n) // start a compound
  287. {
  288. ++n->nesting;
  289. }
  290. static void nbt_end_compound(nbt *n) // end a compound
  291. {
  292. assert(*n->cur == TAG_End);
  293. assert(n->nesting != 0);
  294. ++n->cur;
  295. --n->nesting;
  296. }
  297. // @TODO no interface to get lists from lists
  298. static int nbt_begin_list(nbt *n, unsigned char type)
  299. {
  300. uint32 len;
  301. unsigned char *ptr;
  302. ptr = n->cur + 3 + (n->cur[1]*256 + n->cur[2]);
  303. if (ptr[0] != type)
  304. return -1;
  305. n->cur = ptr;
  306. len = nbt_parse_uint32(n->cur+1);
  307. assert(n->cur[0] == type);
  308. // @TODO keep a stack with the count to make sure they do it right
  309. ++n->nesting;
  310. n->cur += 5;
  311. return (int) len;
  312. }
  313. static void nbt_end_list(nbt *n)
  314. {
  315. --n->nesting;
  316. }
  317. // raw_block chunk is 16x256x16x4 = 2^(4+8+4+2) = 256KB
  318. //
  319. // if we want to process 64x64x256 at a time, that will be:
  320. // 4*4*256KB => 4MB per area in raw_block
  321. //
  322. // (plus we maybe need to decode adjacent regions)
  323. #ifdef FAST_CHUNK
  324. typedef fast_chunk parse_chunk;
  325. #else
  326. typedef chunk parse_chunk;
  327. #endif
  328. static parse_chunk *minecraft_chunk_parse(unsigned char *data, size_t len)
  329. {
  330. char *s;
  331. parse_chunk *c = NULL;
  332. nbt n_store, *n = &n_store;
  333. n->buffer_start = data;
  334. n->buffer_end = data + len;
  335. n->cur = n->buffer_start;
  336. n->nesting = 0;
  337. nbt_begin_compound(n);
  338. while ((s = nbt_peek(n)) != NULL) {
  339. if (!strcmp(s, "Level")) {
  340. int *height;
  341. c = malloc(sizeof(*c));
  342. #ifdef FAST_CHUNK
  343. memset(c, 0, sizeof(*c));
  344. c->pointer_to_free = data;
  345. #else
  346. c->rb[15][15][255].block = 0;
  347. #endif
  348. c->max_y = 0;
  349. nbt_begin_compound(n);
  350. while ((s = nbt_peek(n)) != NULL) {
  351. if (!strcmp(s, "xPos"))
  352. c->xpos = *(int *) nbt_get(n, TAG_Int, 0);
  353. else if (!strcmp(s, "zPos"))
  354. c->zpos = *(int *) nbt_get(n, TAG_Int, 0);
  355. else if (!strcmp(s, "Sections")) {
  356. int count = nbt_begin_list(n, TAG_Compound), i;
  357. if (count == -1) {
  358. // this not-a-list case happens in The End and I'm not sure
  359. // what it means... possibly one of those silly encodings
  360. // where it's not encoded as a list if there's only one?
  361. // not worth figuring out
  362. nbt_skip(n);
  363. count = -1;
  364. }
  365. for (i=0; i < count; ++i) {
  366. int yi, len;
  367. uint8 *light = NULL, *blocks = NULL, *data = NULL, *skylight = NULL;
  368. nbt_begin_compound_in_list(n);
  369. while ((s = nbt_peek(n)) != NULL) {
  370. if (!strcmp(s, "Y"))
  371. yi = * (uint8 *) nbt_get(n, TAG_Byte, 0);
  372. else if (!strcmp(s, "BlockLight")) {
  373. light = nbt_get(n, TAG_Byte_Array, &len);
  374. assert(len == 2048);
  375. } else if (!strcmp(s, "Blocks")) {
  376. blocks = nbt_get(n, TAG_Byte_Array, &len);
  377. assert(len == 4096);
  378. } else if (!strcmp(s, "Data")) {
  379. data = nbt_get(n, TAG_Byte_Array, &len);
  380. assert(len == 2048);
  381. } else if (!strcmp(s, "SkyLight")) {
  382. skylight = nbt_get(n, TAG_Byte_Array, &len);
  383. assert(len == 2048);
  384. }
  385. }
  386. nbt_end_compound(n);
  387. assert(yi < 16);
  388. #ifndef FAST_CHUNK
  389. // clear data below current max_y
  390. {
  391. int x,z;
  392. while (c->max_y < yi*16) {
  393. for (x=0; x < 16; ++x)
  394. for (z=0; z < 16; ++z)
  395. c->rb[z][x][c->max_y].block = 0;
  396. ++c->max_y;
  397. }
  398. }
  399. // now assemble the data
  400. {
  401. int x,y,z, o2=0,o4=0;
  402. for (y=0; y < 16; ++y) {
  403. for (z=0; z < 16; ++z) {
  404. for (x=0; x < 16; x += 2) {
  405. raw_block *rb = &c->rb[15-z][x][y + yi*16]; // 15-z because switching to z-up will require flipping an axis
  406. rb[0].block = blocks[o4];
  407. rb[0].light = light[o2] & 15;
  408. rb[0].data = data[o2] & 15;
  409. rb[0].skylight = skylight[o2] & 15;
  410. rb[256].block = blocks[o4+1];
  411. rb[256].light = light[o2] >> 4;
  412. rb[256].data = data[o2] >> 4;
  413. rb[256].skylight = skylight[o2] >> 4;
  414. o2 += 1;
  415. o4 += 2;
  416. }
  417. }
  418. }
  419. c->max_y += 16;
  420. }
  421. #else
  422. c->blockdata[yi] = blocks;
  423. c->data [yi] = data;
  424. c->light [yi] = light;
  425. c->skylight [yi] = skylight;
  426. #endif
  427. }
  428. //nbt_end_list(n);
  429. } else if (!strcmp(s, "HeightMap")) {
  430. height = nbt_get(n, TAG_Int_Array, &len);
  431. assert(len == 256);
  432. } else
  433. nbt_skip(n);
  434. }
  435. nbt_end_compound(n);
  436. } else
  437. nbt_skip(n);
  438. }
  439. nbt_end_compound(n);
  440. assert(n->cur == n->buffer_end);
  441. return c;
  442. }
  443. #define MAX_DECODED_CHUNK_X 64
  444. #define MAX_DECODED_CHUNK_Z 64
  445. typedef struct
  446. {
  447. int cx,cz;
  448. fast_chunk *fc;
  449. int valid;
  450. } decoded_buffer;
  451. static decoded_buffer decoded_buffers[MAX_DECODED_CHUNK_Z][MAX_DECODED_CHUNK_X];
  452. void lock_chunk_get_mutex(void);
  453. void unlock_chunk_get_mutex(void);
  454. #ifdef FAST_CHUNK
  455. fast_chunk *get_decoded_fastchunk_uncached(int chunk_x, int chunk_z)
  456. {
  457. unsigned char *decoded;
  458. compressed_chunk *cc;
  459. int inlen;
  460. int len;
  461. fast_chunk *fc;
  462. lock_chunk_get_mutex();
  463. cc = get_compressed_chunk(chunk_x, chunk_z);
  464. if (cc->len != 0)
  465. ++cc->refcount;
  466. unlock_chunk_get_mutex();
  467. if (cc->len == 0)
  468. return NULL;
  469. assert(cc != NULL);
  470. assert(cc->data[4] == 2);
  471. inlen = nbt_parse_uint32(cc->data);
  472. decoded = stbi_zlib_decode_malloc_guesssize(cc->data+5, inlen, inlen*3, &len);
  473. assert(decoded != NULL);
  474. assert(len != 0);
  475. lock_chunk_get_mutex();
  476. deref_compressed_chunk(cc);
  477. unlock_chunk_get_mutex();
  478. #ifdef FAST_CHUNK
  479. fc = minecraft_chunk_parse(decoded, len);
  480. #else
  481. fc = NULL;
  482. #endif
  483. if (fc == NULL)
  484. free(decoded);
  485. return fc;
  486. }
  487. decoded_buffer *get_decoded_buffer(int chunk_x, int chunk_z)
  488. {
  489. decoded_buffer *db = &decoded_buffers[chunk_z&(MAX_DECODED_CHUNK_Z-1)][chunk_x&(MAX_DECODED_CHUNK_X-1)];
  490. if (db->valid) {
  491. if (db->cx == chunk_x && db->cz == chunk_z)
  492. return db;
  493. if (db->fc) {
  494. free(db->fc->pointer_to_free);
  495. free(db->fc);
  496. }
  497. }
  498. db->cx = chunk_x;
  499. db->cz = chunk_z;
  500. db->valid = 1;
  501. db->fc = 0;
  502. {
  503. db->fc = get_decoded_fastchunk_uncached(chunk_x, chunk_z);
  504. return db;
  505. }
  506. }
  507. fast_chunk *get_decoded_fastchunk(int chunk_x, int chunk_z)
  508. {
  509. decoded_buffer *db = get_decoded_buffer(chunk_x, chunk_z);
  510. return db->fc;
  511. }
  512. #endif
  513. #ifndef FAST_CHUNK
  514. chunk *get_decoded_chunk_raw(int chunk_x, int chunk_z)
  515. {
  516. unsigned char *decoded;
  517. compressed_chunk *cc = get_compressed_chunk(chunk_x, chunk_z);
  518. assert(cc != NULL);
  519. if (cc->len == 0)
  520. return NULL;
  521. else {
  522. chunk *ch;
  523. int inlen = nbt_parse_uint32(cc->data);
  524. int len;
  525. assert(cc->data[4] == 2);
  526. decoded = stbi_zlib_decode_malloc_guesssize(cc->data+5, inlen, inlen*3, &len);
  527. assert(decoded != NULL);
  528. #ifdef FAST_CHUNK
  529. ch = NULL;
  530. #else
  531. ch = minecraft_chunk_parse(decoded, len);
  532. #endif
  533. free(decoded);
  534. return ch;
  535. }
  536. }
  537. static chunk *decoded_chunks[MAX_DECODED_CHUNK_Z][MAX_DECODED_CHUNK_X];
  538. chunk *get_decoded_chunk(int chunk_x, int chunk_z)
  539. {
  540. chunk *c = decoded_chunks[chunk_z&(MAX_DECODED_CHUNK_Z-1)][chunk_x&(MAX_DECODED_CHUNK_X-1)];
  541. if (c && c->xpos == chunk_x && c->zpos == chunk_z)
  542. return c;
  543. if (c) free(c);
  544. c = get_decoded_chunk_raw(chunk_x, chunk_z);
  545. decoded_chunks[chunk_z&(MAX_DECODED_CHUNK_Z-1)][chunk_x&(MAX_DECODED_CHUNK_X-1)] = c;
  546. return c;
  547. }
  548. #endif