00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026 #include <errno.h>
00027 #include <stdlib.h>
00028 #include <sys/types.h>
00029 #include <unistd.h>
00030 #include <string.h>
00031 #include <fcntl.h>
00032 #include "buffer.h"
00033 #include "buffer_options.h"
00034 #include "buffer_internal.h"
00035 #include "segcol_list.h"
00036 #include "data_object.h"
00037 #include "data_object_file.h"
00038 #include "overlap_graph.h"
00039 #include "list.h"
00040 #include "buffer_util.h"
00041 #include "debug.h"
00042 #include "util.h"
00043 #include "type_limits.h"
00044
00045
00046 #pragma GCC visibility push(default)
00047
00048
00049
00050
00051
00052
00053
00054
00055
00056
00057
00058
00059
00060 static int is_fd_resizable(int fd, int *fd_resizable)
00061 {
00062 struct stat stat;
00063 if (fstat(fd, &stat) == -1)
00064 return_error(errno);
00065
00066 if (S_ISREG(stat.st_mode))
00067 *fd_resizable = 1;
00068 else
00069 *fd_resizable = 0;
00070
00071 return 0;
00072 }
00073
00074
00075
00076
00077
00078
00079
00080
00081
00082 static int reserve_disk_space(int fd, off_t size)
00083 {
00084 #ifdef HAVE_POSIX_FALLOCATE
00085 int err = posix_fallocate(fd, 0, size);
00086 if (err)
00087 return_error(err);
00088 return 0;
00089 #else
00090 off_t cur_size = lseek(fd, 0, SEEK_END);
00091
00092 if (cur_size == -1)
00093 return_error(errno);
00094
00095 if (cur_size >= size)
00096 return 0;
00097
00098 size_t block_size = 4096;
00099
00100 void *zero = calloc(1, block_size);
00101 if (zero == NULL)
00102 return_error(ENOMEM);
00103
00104 off_t bytes_left = size - cur_size;
00105 while (bytes_left > 0) {
00106 off_t nwrite = bytes_left >= block_size ? block_size : bytes_left;
00107 write(fd, zero, nwrite);
00108 bytes_left -= nwrite;
00109 }
00110
00111 return 0;
00112 #endif
00113 }
00114
00115
00116
00117
00118
00119
00120
00121
00122
00123
00124
00125
00126
00127 static int create_overlap_graph(overlap_graph_t **g, segcol_t *segcol,
00128 data_object_t *fd_obj)
00129 {
00130 int err = overlap_graph_new(g, 10);
00131 if (err)
00132 return_error(err);
00133
00134
00135 segcol_iter_t *iter;
00136 err = segcol_iter_new(segcol, &iter);
00137 if (err) {
00138 overlap_graph_free(*g);
00139 return_error(err);
00140 }
00141
00142 int valid;
00143
00144
00145 while (!segcol_iter_is_valid(iter, &valid) && valid) {
00146
00147 segment_t *seg;
00148 segcol_iter_get_segment(iter, &seg);
00149
00150
00151
00152
00153
00154 data_object_t *dobj;
00155 segment_get_data(seg, (void *)&dobj);
00156
00157 int result;
00158 data_object_compare(&result, fd_obj, dobj);
00159
00160 if (result == 0) {
00161 off_t mapping;
00162 segcol_iter_get_mapping(iter, &mapping);
00163 overlap_graph_add_segment(*g, seg, mapping);
00164 }
00165
00166 segcol_iter_next(iter);
00167 }
00168
00169 segcol_iter_free(iter);
00170
00171 return 0;
00172
00173 }
00174
00175
00176
00177
00178
00179
00180
00181
00182
00183
00184
00185 static int break_edge(segcol_t *segcol, struct edge_entry *edge, char *tmpdir)
00186 {
00187 off_t src_start;
00188 segment_get_start(edge->src, &src_start);
00189
00190 data_object_t *dst_dobj;
00191 segment_get_data(edge->dst, (void *)&dst_dobj);
00192
00193
00194 off_t overlap_offset;
00195
00196
00197 if (edge->dst_mapping < src_start)
00198 overlap_offset = src_start;
00199 else
00200 overlap_offset = edge->dst_mapping;
00201
00202
00203 int err = segcol_store_in_memory(segcol, overlap_offset,
00204 edge->weight);
00205
00206 if (err == ENOMEM) {
00207 err = segcol_store_in_file(segcol, overlap_offset,
00208 edge->weight, tmpdir);
00209 }
00210
00211 if (err)
00212 return_error(err);
00213
00214 return 0;
00215 }
00216
00217
00218
00219
00220
00221
00222
00223
00224
00225
00226
00227 static int write_segment(int fd, segment_t *segment, off_t mapping, off_t overlap)
00228 {
00229 int err;
00230
00231 data_object_t *dobj;
00232 segment_get_data(segment, (void **)&dobj);
00233
00234 off_t seg_start;
00235 segment_get_start(segment, &seg_start);
00236
00237 off_t seg_size;
00238 segment_get_size(segment, &seg_size);
00239
00240 off_t nwrite = seg_size;
00241
00242
00243
00244
00245
00246
00247
00248 if (overlap > 0 && mapping >= seg_start) {
00249
00250 if (mapping == seg_start)
00251 return 0;
00252
00253 err = write_data_object_safe(dobj, seg_start, nwrite, fd, mapping);
00254 if (err)
00255 return_error(err);
00256 }
00257 else {
00258 err = write_data_object(dobj, seg_start, nwrite, fd, mapping);
00259 if (err)
00260 return_error(err);
00261 }
00262
00263 return 0;
00264 }
00265
00266
00267
00268
00269
00270
00271
00272
00273
00274
00275
00276 static int write_segcol_rest(int fd, segcol_t *segcol, data_object_t *fd_obj)
00277 {
00278
00279 segcol_iter_t *iter;
00280 int err = segcol_iter_new(segcol, &iter);
00281 if (err)
00282 return_error(err);
00283
00284 int valid;
00285
00286
00287 while (!segcol_iter_is_valid(iter, &valid) && valid) {
00288 segment_t *seg;
00289 segcol_iter_get_segment(iter, &seg);
00290
00291 data_object_t *dobj;
00292 segment_get_data(seg, (void *)&dobj);
00293
00294 int result;
00295 data_object_compare(&result, fd_obj, dobj);
00296
00297
00298
00299
00300
00301 if (result == 1) {
00302 off_t mapping;
00303 segcol_iter_get_mapping(iter, &mapping);
00304 err = write_segment(fd, seg, mapping, 0);
00305 if (err) {
00306 segcol_iter_free(iter);
00307 return_error(err);
00308 }
00309 }
00310
00311 segcol_iter_next(iter);
00312 }
00313
00314 segcol_iter_free(iter);
00315
00316 return 0;
00317 }
00318
00319
00320
00321
00322
00323
00324
00325
00326
00327
00328
00329
00330
00331
00332
00333
00334
00335
00336
00337
00338 static int actions_make_private_copy(bless_buffer_t *buf, data_object_t *obj,
00339 int del)
00340 {
00341 int err;
00342 struct list_node *node;
00343 struct list_node *tmp;
00344 int undo_err = 0;
00345 int redo_err = 0;
00346
00347
00348
00349
00350
00351
00352 list_for_each_reverse_safe(list_tail(buf->undo_list)->prev, node, tmp) {
00353 struct buffer_action_entry *entry =
00354 list_entry(node, struct buffer_action_entry , ln);
00355
00356
00357 if (undo_err) {
00358 list_delete_chain(node, node);
00359 buffer_action_free(entry->action);
00360 free(entry);
00361 }
00362 else
00363 err = buffer_action_private_copy(entry->action, obj);
00364
00365
00366
00367
00368
00369
00370 if (!undo_err && err) {
00371 if (!del)
00372 return_error(err);
00373 undo_err = err;
00374 list_delete_chain(node, node);
00375 buffer_action_free(entry->action);
00376 free(entry);
00377 }
00378 }
00379
00380
00381
00382
00383
00384
00385 list_for_each_safe(list_head(buf->redo_list)->next, node, tmp) {
00386 struct buffer_action_entry *entry =
00387 list_entry(node, struct buffer_action_entry , ln);
00388
00389
00390 if (redo_err) {
00391 list_delete_chain(node, node);
00392 buffer_action_free(entry->action);
00393 free(entry);
00394 }
00395 else
00396 err = buffer_action_private_copy(entry->action, obj);
00397
00398
00399
00400
00401
00402
00403 if (!redo_err && err) {
00404 if (!del)
00405 return_error(err);
00406 redo_err = err;
00407 list_delete_chain(node, node);
00408 buffer_action_free(entry->action);
00409 free(entry);
00410 }
00411 }
00412
00413 if (undo_err)
00414 return_error(undo_err);
00415
00416 if (redo_err)
00417 return_error(redo_err);
00418
00419 return 0;
00420 }
00421
00422
00423
00424
00425
00426
00427
00428
00429 static int buffer_options_new(struct buffer_options **opts)
00430 {
00431 *opts = malloc(sizeof(**opts));
00432 if (*opts == NULL)
00433 return_error(ENOMEM);
00434
00435
00436 (*opts)->tmp_dir = strdup("/tmp");
00437 if ((*opts)->tmp_dir == NULL)
00438 return_error(ENOMEM);
00439
00440 (*opts)->undo_limit = __MAX(size_t);
00441
00442 (*opts)->undo_limit_str = strdup("infinite");
00443 if ((*opts)->undo_limit_str == NULL) {
00444 free((*opts)->tmp_dir);
00445 return_error(ENOMEM);
00446 }
00447
00448 (*opts)->undo_after_save = strdup("best_effort");
00449 if ((*opts)->undo_after_save == NULL) {
00450 free((*opts)->undo_limit_str);
00451 free((*opts)->tmp_dir);
00452 return_error(ENOMEM);
00453 }
00454
00455 return 0;
00456 }
00457
00458
00459
00460
00461
00462
00463
00464
00465 static int buffer_options_free(struct buffer_options *opts)
00466 {
00467 free(opts->tmp_dir);
00468 free(opts->undo_limit_str);
00469 free(opts->undo_after_save);
00470 free(opts);
00471
00472 return 0;
00473 }
00474
00475
00476
00477
00478
00479
00480 static void free_vertex_list(list_t *list)
00481 {
00482 struct list_node *node;
00483 struct list_node *tmp;
00484
00485 list_for_each_safe(list_head(list)->next, node, tmp) {
00486 struct vertex_entry *entry =
00487 list_entry(node, struct vertex_entry, ln);
00488
00489 free(entry);
00490 }
00491
00492 list_free(list);
00493 }
00494
00495
00496
00497
00498
00499
00500 static void free_edge_list(list_t *list)
00501 {
00502 struct list_node *node;
00503 struct list_node *tmp;
00504
00505 list_for_each_safe(list_head(list)->next, node, tmp) {
00506 struct edge_entry *entry =
00507 list_entry(node, struct edge_entry, ln);
00508
00509 free(entry);
00510 }
00511
00512 list_free(list);
00513 }
00514
00515
00516
00517
00518
00519
00520
00521
00522
00523
00524
00525
00526 int bless_buffer_new(bless_buffer_t **buf)
00527 {
00528 if (buf == NULL)
00529 return_error(EINVAL);
00530
00531 *buf = malloc(sizeof **buf);
00532
00533 if (*buf == NULL)
00534 return_error(ENOMEM);
00535
00536 int err = segcol_list_new(&(*buf)->segcol);
00537 if (err)
00538 goto fail_segcol;
00539
00540 err = buffer_options_new(&(*buf)->options);
00541 if (err)
00542 goto fail_options;
00543
00544 err = list_new(&(*buf)->undo_list, struct buffer_action_entry, ln);
00545 if (err)
00546 goto fail_undo;
00547
00548 (*buf)->undo_list_size = 0;
00549
00550 err = list_new(&(*buf)->redo_list, struct buffer_action_entry, ln);
00551 if (err)
00552 goto fail_redo;
00553
00554 (*buf)->redo_list_size = 0;
00555 (*buf)->multi_action = NULL;
00556 (*buf)->multi_action_mode = 0;
00557 (*buf)->event_func = NULL;
00558 (*buf)->event_user_data = NULL;
00559
00560 return 0;
00561
00562
00563 fail_redo:
00564 list_free((*buf)->undo_list);
00565 fail_undo:
00566 buffer_options_free((*buf)->options);
00567 fail_options:
00568 segcol_free((*buf)->segcol);
00569 fail_segcol:
00570 free(buf);
00571
00572 return_error(err);
00573 }
00574
00575
00576
00577
00578
00579
00580
00581
00582
00583
00584
00585 int bless_buffer_save(bless_buffer_t *buf, int fd,
00586 bless_progress_func *progress_func)
00587 {
00588 UNUSED_PARAM(progress_func);
00589
00590 if (buf == NULL)
00591 return_error(EINVAL);
00592
00593 off_t segcol_size;
00594 segcol_get_size(buf->segcol, &segcol_size);
00595
00596
00597
00598
00599
00600 int fd_resizable = 0;
00601
00602 int err = is_fd_resizable(fd, &fd_resizable);
00603 if (err)
00604 return_error(err);
00605
00606
00607
00608
00609
00610
00611
00612
00613 if (fd_resizable == 1) {
00614 err = reserve_disk_space(fd, segcol_size);
00615 if (err)
00616 return_error(err);
00617 } else {
00618 off_t fd_size = lseek(fd, 0, SEEK_END);
00619 if (fd_size < segcol_size)
00620 return_error(ENOSPC);
00621 }
00622
00623
00624 data_object_t *fd_obj;
00625 err = data_object_file_new(&fd_obj, fd);
00626 if (err)
00627 return_error(err);
00628
00629
00630 if (!strcmp(buf->options->undo_after_save, "always")) {
00631
00632
00633
00634
00635 err = actions_make_private_copy(buf, fd_obj, 0);
00636 if (err)
00637 goto fail1;
00638 }
00639 else if (!strcmp(buf->options->undo_after_save, "best_effort")) {
00640
00641
00642
00643
00644
00645 actions_make_private_copy(buf, fd_obj, 1);
00646 }
00647 else if (strcmp(buf->options->undo_after_save, "never")) {
00648
00649 err = EINVAL;
00650 goto fail1;
00651 }
00652
00653
00654
00655
00656 overlap_graph_t *g;
00657 err = create_overlap_graph(&g, buf->segcol, fd_obj);
00658 if (err)
00659 goto fail1;
00660
00661
00662 err = overlap_graph_remove_cycles(g);
00663 if (err)
00664 goto fail2;
00665
00666
00667 list_t *removed_edges;
00668 err = overlap_graph_get_removed_edges(g, &removed_edges);
00669 if (err)
00670 goto fail2;
00671
00672
00673 struct list_node *first_node =
00674 list_head(removed_edges)->next;
00675 struct list_node *node;
00676
00677 list_for_each(first_node, node) {
00678 struct edge_entry *e = list_entry(node, struct edge_entry, ln);
00679
00680 err = break_edge(buf->segcol, e, buf->options->tmp_dir);
00681 if (err)
00682 goto fail3;
00683 }
00684
00685 free_edge_list(removed_edges);
00686 overlap_graph_free(g);
00687
00688
00689
00690
00691
00692
00693
00694
00695 segcol_t *segcol_tmp;
00696 err = segcol_list_new(&segcol_tmp);
00697 if (err)
00698 goto fail1;
00699
00700 segment_t *fd_seg;
00701 err = segment_new(&fd_seg, fd_obj, 0, segcol_size, data_object_update_usage);
00702 if (err)
00703 goto fail4;
00704
00705 err = segcol_append(segcol_tmp, fd_seg);
00706 if (err) {
00707 segment_free(fd_seg);
00708 goto fail4;
00709 }
00710
00711
00712
00713
00714
00715 err = create_overlap_graph(&g, buf->segcol, fd_obj);
00716 if (err)
00717 goto fail4;
00718
00719
00720 list_t *vertices;
00721 err = overlap_graph_get_vertices_topo(g, &vertices);
00722 if (err)
00723 goto fail5;
00724
00725 first_node =
00726 list_head(vertices)->next;
00727
00728 list_for_each(first_node, node) {
00729 struct vertex_entry *v = list_entry(node, struct vertex_entry, ln);
00730 err = write_segment(fd, v->segment, v->mapping, v->self_loop_weight);
00731 if (err)
00732 goto fail6;
00733 }
00734
00735 free_vertex_list(vertices);
00736 overlap_graph_free(g);
00737
00738
00739 err = write_segcol_rest(fd, buf->segcol, fd_obj);
00740 if (err)
00741 goto fail4;
00742
00743
00744 if (fd_resizable == 1) {
00745 err = ftruncate(fd, segcol_size);
00746 if (err == -1) {
00747 err = errno;
00748 goto fail4;
00749 }
00750 }
00751
00752
00753 segcol_free(buf->segcol);
00754 buf->segcol = segcol_tmp;
00755
00756 if (!strcmp(buf->options->undo_after_save, "never")) {
00757
00758 action_list_clear(buf->undo_list);
00759 buf->undo_list_size = 0;
00760
00761 action_list_clear(buf->redo_list);
00762 buf->redo_list_size = 0;
00763 }
00764
00765
00766 if (buf->event_func != NULL) {
00767 struct bless_buffer_event_info event_info;
00768 event_info.event_type = BLESS_BUFFER_EVENT_SAVE;
00769 event_info.action_type = BLESS_BUFFER_ACTION_NONE;
00770 event_info.range_start = -1;
00771 event_info.range_length = -1;
00772 event_info.save_fd = fd;
00773 (*buf->event_func)(buf, &event_info, buf->event_user_data);
00774 }
00775
00776 return 0;
00777
00778
00779 fail3:
00780 free_edge_list(removed_edges);
00781 fail2:
00782 overlap_graph_free(g);
00783 fail1:
00784 data_object_free(fd_obj);
00785
00786 return_error(err);
00787
00788 fail6:
00789 free_vertex_list(vertices);
00790 fail5:
00791 overlap_graph_free(g);
00792 fail4:
00793 segcol_free(segcol_tmp);
00794 goto fail1;
00795
00796 }
00797
00798
00799
00800
00801
00802
00803
00804
00805
00806 int bless_buffer_free(bless_buffer_t *buf)
00807 {
00808 if (buf == NULL)
00809 return_error(EINVAL);
00810
00811 int err = segcol_free(buf->segcol);
00812 if (err)
00813 return_error(err);
00814
00815 err = buffer_options_free(buf->options);
00816 if (err)
00817 return_error(err);
00818
00819
00820 struct list_node *node;
00821 struct list_node *tmp;
00822
00823 list_for_each_safe(list_head(buf->undo_list)->next, node, tmp) {
00824 struct buffer_action_entry *entry =
00825 list_entry(node, struct buffer_action_entry , ln);
00826
00827 buffer_action_free(entry->action);
00828 free(entry);
00829 }
00830
00831 list_free(buf->undo_list);
00832
00833
00834 list_for_each_safe(list_head(buf->redo_list)->next, node, tmp) {
00835 struct buffer_action_entry *entry =
00836 list_entry(node, struct buffer_action_entry , ln);
00837
00838 buffer_action_free(entry->action);
00839 free(entry);
00840 }
00841
00842 list_free(buf->redo_list);
00843
00844 free(buf);
00845
00846 return 0;
00847 }
00848
00849 #pragma GCC visibility pop
00850