goto cleanup;
case 'e':
- ret = !repo_has_object_file(the_repository, &oid);
+ ret = !has_object(the_repository, &oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR);
goto cleanup;
case 'w':
continue;
if (ends_with(ref->name, "^{}"))
continue;
- if (!repo_has_object_file_with_flags(the_repository, &ref->old_oid,
- OBJECT_INFO_QUICK |
- OBJECT_INFO_SKIP_FETCH_OBJECT))
+ if (!has_object(the_repository, &ref->old_oid, 0))
continue;
refs_update_ref(get_main_ref_store(the_repository), msg,
ref->name, &ref->old_oid, NULL, 0,
struct string_list_item *remote_ref_item;
const struct ref *ref;
struct refname_hash_entry *item = NULL;
- const int quick_flags = OBJECT_INFO_QUICK | OBJECT_INFO_SKIP_FETCH_OBJECT;
refname_hash_init(&existing_refs);
refname_hash_init(&remote_refs);
*/
if (ends_with(ref->name, "^{}")) {
if (item &&
- !repo_has_object_file_with_flags(the_repository, &ref->old_oid, quick_flags) &&
+ !has_object(the_repository, &ref->old_oid, 0) &&
!oidset_contains(&fetch_oids, &ref->old_oid) &&
- !repo_has_object_file_with_flags(the_repository, &item->oid, quick_flags) &&
+ !has_object(the_repository, &item->oid, 0) &&
!oidset_contains(&fetch_oids, &item->oid))
clear_item(item);
item = NULL;
* fetch.
*/
if (item &&
- !repo_has_object_file_with_flags(the_repository, &item->oid, quick_flags) &&
+ !has_object(the_repository, &item->oid, 0) &&
!oidset_contains(&fetch_oids, &item->oid))
clear_item(item);
* checked to see if it needs fetching.
*/
if (item &&
- !repo_has_object_file_with_flags(the_repository, &item->oid, quick_flags) &&
+ !has_object(the_repository, &item->oid, 0) &&
!oidset_contains(&fetch_oids, &item->oid))
clear_item(item);
struct commit *current = NULL, *updated;
int fast_forward = 0;
- if (!repo_has_object_file(the_repository, &ref->new_oid))
+ if (!has_object(the_repository, &ref->new_oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
die(_("object %s not found"), oid_to_hex(&ref->new_oid));
if (oideq(&ref->old_oid, &ref->new_oid)) {
* we need all direct targets to exist.
*/
for (r = rm; r; r = r->next) {
- if (!repo_has_object_file_with_flags(the_repository, &r->old_oid,
- OBJECT_INFO_SKIP_FETCH_OBJECT))
+ if (!has_object(the_repository, &r->old_oid, HAS_OBJECT_RECHECK_PACKED))
return -1;
}
if (startup_info->have_repository) {
read_lock();
- collision_test_needed =
- repo_has_object_file_with_flags(the_repository, oid,
- OBJECT_INFO_QUICK);
+ collision_test_needed = has_object(the_repository, oid,
+ HAS_OBJECT_FETCH_PROMISOR);
read_unlock();
}
}
}
- if (!is_null_oid(new_oid) && !repo_has_object_file(the_repository, new_oid)) {
+ if (!is_null_oid(new_oid) &&
+ !has_object(the_repository, new_oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR)) {
error("unpack should have generated %s, "
"but I can't find it!", oid_to_hex(new_oid));
ret = "bad pack";
info->status = PUSH_STATUS_UPTODATE;
else if (is_null_oid(&ref->old_oid))
info->status = PUSH_STATUS_CREATE;
- else if (repo_has_object_file(the_repository, &ref->old_oid) &&
+ else if (has_object(the_repository, &ref->old_oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR) &&
ref_newer(&ref->new_oid, &ref->old_oid))
info->status = PUSH_STATUS_FASTFORWARD;
else
const char *hex;
struct object_id peeled;
- if (!repo_has_object_file(the_repository, oid))
+ if (!has_object(the_repository, oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
die("git show-ref: bad ref %s (%s)", refname,
oid_to_hex(oid));
delta_data = get_data(delta_size);
if (!delta_data)
return;
- if (repo_has_object_file(the_repository, &base_oid))
+ if (has_object(the_repository, &base_oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
; /* Ok we have this one */
else if (resolve_against_held(nr, &base_oid,
delta_data, delta_size))
static int already_written(struct bulk_checkin_packfile *state, struct object_id *oid)
{
/* The object may already exist in the repository */
- if (repo_has_object_file(the_repository, oid))
+ if (has_object(the_repository, oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
return 1;
/* Might want to keep the list sorted */
int i;
if (!it)
return 0;
- if (it->entry_count < 0 || !repo_has_object_file(the_repository, &it->oid))
+ if (it->entry_count < 0 ||
+ has_object(the_repository, &it->oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
return 0;
for (i = 0; i < it->subtree_nr; i++) {
if (!cache_tree_fully_valid(it->down[i]->cache_tree))
}
}
- if (0 <= it->entry_count && repo_has_object_file(the_repository, &it->oid))
+ if (0 <= it->entry_count &&
+ has_object(the_repository, &it->oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
return it->entry_count;
/*
ce_missing_ok = mode == S_IFGITLINK || missing_ok ||
!must_check_existence(ce);
if (is_null_oid(oid) ||
- (!ce_missing_ok && !repo_has_object_file(the_repository, oid))) {
+ (!ce_missing_ok && !has_object(the_repository, oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))) {
strbuf_release(&buffer);
if (expected_missing)
return -1;
struct object_id oid;
hash_object_file(the_hash_algo, buffer.buf, buffer.len,
OBJ_TREE, &oid);
- if (repo_has_object_file_with_flags(the_repository, &oid, OBJECT_INFO_SKIP_FETCH_OBJECT))
+ if (has_object(the_repository, &oid, HAS_OBJECT_RECHECK_PACKED))
oidcpy(&it->oid, &oid);
else
to_invalidate = 1;
if (!commit) {
struct object *o;
- if (!repo_has_object_file_with_flags(the_repository, &ref->old_oid,
- OBJECT_INFO_QUICK |
- OBJECT_INFO_SKIP_FETCH_OBJECT))
+ if (!has_object(the_repository, &ref->old_oid, 0))
continue;
o = parse_object(the_repository, &ref->old_oid);
if (!o || o->type != OBJ_COMMIT)
struct oid_array extra = OID_ARRAY_INIT;
struct object_id *oid = si->shallow->oid;
for (i = 0; i < si->shallow->nr; i++)
- if (repo_has_object_file(the_repository, &oid[i]))
+ if (has_object(the_repository, &oid[i],
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
oid_array_append(&extra, &oid[i]);
if (extra.nr) {
setup_alternate_shallow(&shallow_lock,
* Fetch a copy of the object if it doesn't exist locally - it
* may be required for updating server info later.
*/
- if (repo->can_update_info_refs && !repo_has_object_file(the_repository, &ref->old_oid)) {
+ if (repo->can_update_info_refs &&
+ !has_object(the_repository, &ref->old_oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR)) {
obj = lookup_unknown_object(the_repository, &ref->old_oid);
fprintf(stderr, " fetch %s for %s\n",
oid_to_hex(&ref->old_oid), refname);
return error("Remote HEAD symrefs too deep");
if (is_null_oid(&head_oid))
return error("Unable to resolve remote HEAD");
- if (!repo_has_object_file(the_repository, &head_oid))
+ if (!has_object(the_repository, &head_oid, HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
return error("Remote HEAD resolves to object %s\nwhich does not exist locally, perhaps you need to fetch?", oid_to_hex(&head_oid));
/* Remote branch must resolve to a known object */
if (is_null_oid(&remote_ref->old_oid))
return error("Unable to resolve remote branch %s",
remote_ref->name);
- if (!repo_has_object_file(the_repository, &remote_ref->old_oid))
+ if (!has_object(the_repository, &remote_ref->old_oid, HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
return error("Remote branch %s resolves to object %s\nwhich does not exist locally, perhaps you need to fetch?", remote_ref->name, oid_to_hex(&remote_ref->old_oid));
/* Remote branch must be an ancestor of remote HEAD */
if (!force_all &&
!is_null_oid(&ref->old_oid) &&
!ref->force) {
- if (!repo_has_object_file(the_repository, &ref->old_oid) ||
+ if (!has_object(the_repository, &ref->old_oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR) ||
!ref_newer(&ref->peer_ref->new_oid,
&ref->old_oid)) {
/*
list_for_each_safe(pos, tmp, head) {
obj_req = list_entry(pos, struct object_request, node);
if (obj_req->state == WAITING) {
- if (repo_has_object_file(the_repository, &obj_req->oid))
+ if (has_object(the_repository, &obj_req->oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
obj_req->state = COMPLETE;
else {
start_object_request(obj_req);
if (!obj_req)
return error("Couldn't find request for %s in the queue", hex);
- if (repo_has_object_file(the_repository, &obj_req->oid)) {
+ if (has_object(the_repository, &obj_req->oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR)) {
if (obj_req->req)
abort_http_object_request(&obj_req->req);
abort_object_request(obj_req);
* of missing objects.
*/
if (ctx->revs->exclude_promisor_objects &&
- !repo_has_object_file(the_repository, &obj->oid) &&
+ !has_object(the_repository, &obj->oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR) &&
is_promisor_object(ctx->revs->repo, &obj->oid))
return;
struct note_delete_list **l = (struct note_delete_list **) cb_data;
struct note_delete_list *n;
- if (repo_has_object_file(the_repository, object_oid))
+ if (has_object(the_repository, object_oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
return 0; /* nothing to do for this note */
/* failed to find object => prune this note */
char *co_buf;
hash_object_file(repo->hash_algo, buf, len, type, oid);
- if (repo_has_object_file_with_flags(repo, oid, OBJECT_INFO_QUICK | OBJECT_INFO_SKIP_FETCH_OBJECT) ||
+ if (has_object(repo, oid, 0) ||
find_cached_object(repo->objects, oid))
return 0;
init_tree_desc(&desc, &tree->object.oid, tree->buffer, tree->size);
complete = 1;
while (tree_entry(&desc, &entry)) {
- if (!repo_has_object_file(the_repository, &entry.oid) ||
+ if (!has_object(the_repository, &entry.oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR) ||
(S_ISDIR(entry.mode) && !tree_is_complete(&entry.oid))) {
tree->object.flags |= INCOMPLETE;
complete = 0;
{
if (flags & REF_ISBROKEN)
return 0;
- if (!repo_has_object_file(repo, oid)) {
+ if (!has_object(repo, oid, HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR)) {
error(_("%s does not point to a valid object!"), refname);
return 0;
}
if (!reject_reason && !ref->deletion && !is_null_oid(&ref->old_oid)) {
if (starts_with(ref->name, "refs/tags/"))
reject_reason = REF_STATUS_REJECT_ALREADY_EXISTS;
- else if (!repo_has_object_file_with_flags(the_repository, &ref->old_oid, OBJECT_INFO_SKIP_FETCH_OBJECT))
+ else if (!has_object(the_repository, &ref->old_oid, HAS_OBJECT_RECHECK_PACKED))
reject_reason = REF_STATUS_REJECT_FETCH_FIRST;
else if (!lookup_commit_reference_gently(the_repository, &ref->old_oid, 1) ||
!lookup_commit_reference_gently(the_repository, &ref->new_oid, 1))
static void feed_object(struct repository *r,
const struct object_id *oid, FILE *fh, int negative)
{
- if (negative &&
- !repo_has_object_file_with_flags(r, oid,
- OBJECT_INFO_SKIP_FETCH_OBJECT |
- OBJECT_INFO_QUICK))
+ if (negative && !has_object(r, oid, 0))
return;
if (negative)
if (graft->nr_parent != -1)
return 0;
if (data->flags & QUICK) {
- if (!repo_has_object_file(the_repository, &graft->oid))
+ if (!has_object(the_repository, &graft->oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
return 0;
} else if (data->flags & SEEN_ONLY) {
struct commit *c = lookup_commit(the_repository, &graft->oid);
ALLOC_ARRAY(info->ours, sa->nr);
ALLOC_ARRAY(info->theirs, sa->nr);
for (size_t i = 0; i < sa->nr; i++) {
- if (repo_has_object_file(the_repository, sa->oid + i)) {
+ if (has_object(the_repository, sa->oid + i,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR)) {
struct commit_graft *graft;
graft = lookup_commit_graft(the_repository,
&sa->oid[i]);
for (i = dst = 0; i < info->nr_theirs; i++) {
if (i != dst)
info->theirs[dst] = info->theirs[i];
- if (repo_has_object_file(the_repository, oid + info->theirs[i]))
+ if (has_object(the_repository, oid + info->theirs[i],
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
dst++;
}
info->nr_theirs = dst;
{
if (get_oid_hex(hex, oid))
die("git upload-pack: expected SHA1 object, got '%s'", hex);
- if (!repo_has_object_file_with_flags(the_repository, oid,
- OBJECT_INFO_QUICK | OBJECT_INFO_SKIP_FETCH_OBJECT))
+ if (!has_object(the_repository, oid, 0))
return -1;
return do_got_oid(data, oid);
}
return 0;
obj->flags |= SEEN;
- if (repo_has_object_file(the_repository, &obj->oid)) {
+ if (has_object(the_repository, &obj->oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR)) {
/* We already have it, so we should scan it now. */
obj->flags |= TO_SCAN;
}