Skip to content
Snippets Groups Projects
Commit f2b02f06 authored by zork@chromium.org's avatar zork@chromium.org
Browse files

Scope the WriteTransactions during model association so that we don't lock the UI thread

BUG=34206
TEST=none

Review URL: http://codereview.chromium.org/1361002

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@42708 0039d316-1c4b-4281-b951-d872f2087c98
parent 9db5f4e6
No related merge requests found
......@@ -43,93 +43,100 @@ bool AutofillModelAssociator::AssociateModels() {
return false;
}
sync_api::WriteTransaction trans(
sync_service_->backend()->GetUserShareHandle());
sync_api::ReadNode autofill_root(&trans);
if (!autofill_root.InitByTagLookup(kAutofillTag)) {
error_handler_->OnUnrecoverableError();
LOG(ERROR) << "Server did not create the top-level autofill node. We "
<< "might be running against an out-of-date server.";
return false;
}
std::set<AutofillKey> current_entries;
std::vector<AutofillEntry> new_entries;
for (std::vector<AutofillEntry>::iterator ix = entries.begin();
ix != entries.end(); ++ix) {
if (id_map_.find(ix->key()) != id_map_.end()) {
// It seems that name/value pairs are not unique in the web database.
// As a result, we have to filter out duplicates here. This is probably
// a bug in the database.
continue;
{
sync_api::WriteTransaction trans(
sync_service_->backend()->GetUserShareHandle());
sync_api::ReadNode autofill_root(&trans);
if (!autofill_root.InitByTagLookup(kAutofillTag)) {
error_handler_->OnUnrecoverableError();
LOG(ERROR) << "Server did not create the top-level autofill node. We "
<< "might be running against an out-of-date server.";
return false;
}
std::string tag = KeyToTag(ix->key().name(), ix->key().value());
sync_api::ReadNode node(&trans);
if (node.InitByClientTagLookup(syncable::AUTOFILL, tag)) {
const sync_pb::AutofillSpecifics& autofill(node.GetAutofillSpecifics());
DCHECK_EQ(tag, KeyToTag(UTF8ToUTF16(autofill.name()),
UTF8ToUTF16(autofill.value())));
for (std::vector<AutofillEntry>::iterator ix = entries.begin();
ix != entries.end(); ++ix) {
if (id_map_.find(ix->key()) != id_map_.end()) {
// It seems that name/value pairs are not unique in the web database.
// As a result, we have to filter out duplicates here. This is probably
// a bug in the database.
continue;
}
std::vector<base::Time> timestamps;
if (MergeTimestamps(autofill, ix->timestamps(), &timestamps)) {
AutofillEntry new_entry(ix->key(), timestamps);
new_entries.push_back(new_entry);
std::string tag = KeyToTag(ix->key().name(), ix->key().value());
sync_api::ReadNode node(&trans);
if (node.InitByClientTagLookup(syncable::AUTOFILL, tag)) {
const sync_pb::AutofillSpecifics& autofill(node.GetAutofillSpecifics());
DCHECK_EQ(tag, KeyToTag(UTF8ToUTF16(autofill.name()),
UTF8ToUTF16(autofill.value())));
std::vector<base::Time> timestamps;
if (MergeTimestamps(autofill, ix->timestamps(), &timestamps)) {
AutofillEntry new_entry(ix->key(), timestamps);
new_entries.push_back(new_entry);
sync_api::WriteNode write_node(&trans);
if (!write_node.InitByClientTagLookup(syncable::AUTOFILL, tag)) {
LOG(ERROR) << "Failed to write autofill sync node.";
return false;
}
AutofillChangeProcessor::WriteAutofill(&write_node, new_entry);
}
sync_api::WriteNode write_node(&trans);
if (!write_node.InitByClientTagLookup(syncable::AUTOFILL, tag)) {
LOG(ERROR) << "Failed to write autofill sync node.";
Associate(&(ix->key()), node.GetId());
} else {
sync_api::WriteNode node(&trans);
if (!node.InitUniqueByCreation(syncable::AUTOFILL,
autofill_root, tag)) {
LOG(ERROR) << "Failed to create autofill sync node.";
error_handler_->OnUnrecoverableError();
return false;
}
AutofillChangeProcessor::WriteAutofill(&write_node, new_entry);
node.SetTitle(UTF16ToWide(ix->key().name() + ix->key().value()));
AutofillChangeProcessor::WriteAutofill(&node, *ix);
Associate(&(ix->key()), node.GetId());
}
Associate(&(ix->key()), node.GetId());
} else {
sync_api::WriteNode node(&trans);
if (!node.InitUniqueByCreation(syncable::AUTOFILL, autofill_root, tag)) {
LOG(ERROR) << "Failed to create autofill sync node.";
current_entries.insert(ix->key());
}
int64 sync_child_id = autofill_root.GetFirstChildId();
while (sync_child_id != sync_api::kInvalidId) {
sync_api::ReadNode sync_child_node(&trans);
if (!sync_child_node.InitByIdLookup(sync_child_id)) {
LOG(ERROR) << "Failed to fetch child node.";
error_handler_->OnUnrecoverableError();
return false;
}
node.SetTitle(UTF16ToWide(ix->key().name() + ix->key().value()));
AutofillChangeProcessor::WriteAutofill(&node, *ix);
Associate(&(ix->key()), node.GetId());
}
current_entries.insert(ix->key());
}
int64 sync_child_id = autofill_root.GetFirstChildId();
while (sync_child_id != sync_api::kInvalidId) {
sync_api::ReadNode sync_child_node(&trans);
if (!sync_child_node.InitByIdLookup(sync_child_id)) {
LOG(ERROR) << "Failed to fetch child node.";
error_handler_->OnUnrecoverableError();
return false;
}
const sync_pb::AutofillSpecifics& autofill(
sync_child_node.GetAutofillSpecifics());
AutofillKey key(UTF8ToUTF16(autofill.name()),
UTF8ToUTF16(autofill.value()));
if (current_entries.find(key) == current_entries.end()) {
std::vector<base::Time> timestamps;
int timestamps_count = autofill.usage_timestamp_size();
for (int c = 0; c < timestamps_count; ++c) {
timestamps.push_back(base::Time::FromInternalValue(
autofill.usage_timestamp(c)));
const sync_pb::AutofillSpecifics& autofill(
sync_child_node.GetAutofillSpecifics());
AutofillKey key(UTF8ToUTF16(autofill.name()),
UTF8ToUTF16(autofill.value()));
if (current_entries.find(key) == current_entries.end()) {
std::vector<base::Time> timestamps;
int timestamps_count = autofill.usage_timestamp_size();
for (int c = 0; c < timestamps_count; ++c) {
timestamps.push_back(base::Time::FromInternalValue(
autofill.usage_timestamp(c)));
}
Associate(&key, sync_child_node.GetId());
new_entries.push_back(AutofillEntry(key, timestamps));
}
Associate(&key, sync_child_node.GetId());
new_entries.push_back(AutofillEntry(key, timestamps));
}
sync_child_id = sync_child_node.GetSuccessorId();
sync_child_id = sync_child_node.GetSuccessorId();
}
}
// Since we're on the DB thread, we don't have to worry about updating
// the autofill database after closing the write transaction, since
// this is the only thread that writes to the database. We also don't have
// to worry about the sync model getting out of sync, because changes are
// propogated to the ChangeProcessor on this thread.
if (new_entries.size() &&
!web_database_->UpdateAutofillEntries(new_entries)) {
LOG(ERROR) << "Failed to update autofill entries.";
......
......@@ -62,6 +62,9 @@ void TypedUrlChangeProcessor::HandleURLsModified(
history::URLsModifiedDetails* details) {
sync_api::WriteTransaction trans(share_handle());
// TODO(sync): Get visits without holding the write transaction.
// See issue 34206
sync_api::ReadNode typed_url_root(&trans);
if (!typed_url_root.InitByTagLookup(kTypedUrlTag)) {
error_handler()->OnUnrecoverableError();
......
......@@ -42,94 +42,101 @@ bool TypedUrlModelAssociator::AssociateModels() {
return false;
}
sync_api::WriteTransaction trans(
sync_service_->backend()->GetUserShareHandle());
sync_api::ReadNode typed_url_root(&trans);
if (!typed_url_root.InitByTagLookup(kTypedUrlTag)) {
LOG(ERROR) << "Server did not create the top-level typed_url node. We "
<< "might be running against an out-of-date server.";
return false;
}
std::set<std::string> current_urls;
TypedUrlTitleVector titles;
TypedUrlVector new_urls;
TypedUrlUpdateVector updated_urls;
for (std::vector<history::URLRow>::iterator ix = typed_urls.begin();
ix != typed_urls.end(); ++ix) {
std::string tag = ix->url().spec();
sync_api::ReadNode node(&trans);
if (node.InitByClientTagLookup(syncable::TYPED_URLS, tag)) {
const sync_pb::TypedUrlSpecifics& typed_url(node.GetTypedUrlSpecifics());
DCHECK_EQ(tag, typed_url.url());
{
sync_api::WriteTransaction trans(
sync_service_->backend()->GetUserShareHandle());
sync_api::ReadNode typed_url_root(&trans);
if (!typed_url_root.InitByTagLookup(kTypedUrlTag)) {
LOG(ERROR) << "Server did not create the top-level typed_url node. We "
<< "might be running against an out-of-date server.";
return false;
}
history::URLRow new_url(ix->url());
std::set<std::string> current_urls;
for (std::vector<history::URLRow>::iterator ix = typed_urls.begin();
ix != typed_urls.end(); ++ix) {
std::string tag = ix->url().spec();
sync_api::ReadNode node(&trans);
if (node.InitByClientTagLookup(syncable::TYPED_URLS, tag)) {
const sync_pb::TypedUrlSpecifics& typed_url(node.GetTypedUrlSpecifics());
DCHECK_EQ(tag, typed_url.url());
history::URLRow new_url(ix->url());
int difference = MergeUrls(typed_url, *ix, &new_url);
if (difference & DIFF_NODE_CHANGED) {
sync_api::WriteNode write_node(&trans);
if (!write_node.InitByClientTagLookup(syncable::TYPED_URLS, tag)) {
LOG(ERROR) << "Failed to edit typed_url sync node.";
return false;
}
WriteToSyncNode(new_url, &write_node);
}
if (difference & DIFF_TITLE_CHANGED) {
titles.push_back(std::pair<GURL, std::wstring>(new_url.url(),
new_url.title()));
}
if (difference & DIFF_ROW_CHANGED) {
updated_urls.push_back(
std::pair<history::URLID, history::URLRow>(ix->id(), new_url));
}
int difference = MergeUrls(typed_url, *ix, &new_url);
if (difference & DIFF_NODE_CHANGED) {
sync_api::WriteNode write_node(&trans);
if (!write_node.InitByClientTagLookup(syncable::TYPED_URLS, tag)) {
LOG(ERROR) << "Failed to edit typed_url sync node.";
Associate(&tag, node.GetId());
} else {
sync_api::WriteNode node(&trans);
if (!node.InitUniqueByCreation(syncable::TYPED_URLS,
typed_url_root, tag)) {
LOG(ERROR) << "Failed to create typed_url sync node.";
return false;
}
WriteToSyncNode(new_url, &write_node);
}
if (difference & DIFF_TITLE_CHANGED) {
titles.push_back(std::pair<GURL, std::wstring>(new_url.url(),
new_url.title()));
}
if (difference & DIFF_ROW_CHANGED) {
updated_urls.push_back(
std::pair<history::URLID, history::URLRow>(ix->id(), new_url));
}
Associate(&tag, node.GetId());
} else {
sync_api::WriteNode node(&trans);
if (!node.InitUniqueByCreation(syncable::TYPED_URLS,
typed_url_root, tag)) {
LOG(ERROR) << "Failed to create typed_url sync node.";
return false;
}
node.SetTitle(UTF8ToWide(tag));
WriteToSyncNode(*ix, &node);
node.SetTitle(UTF8ToWide(tag));
WriteToSyncNode(*ix, &node);
Associate(&tag, node.GetId());
}
Associate(&tag, node.GetId());
current_urls.insert(tag);
}
current_urls.insert(tag);
}
int64 sync_child_id = typed_url_root.GetFirstChildId();
while (sync_child_id != sync_api::kInvalidId) {
sync_api::ReadNode sync_child_node(&trans);
if (!sync_child_node.InitByIdLookup(sync_child_id)) {
LOG(ERROR) << "Failed to fetch child node.";
return false;
}
const sync_pb::TypedUrlSpecifics& typed_url(
sync_child_node.GetTypedUrlSpecifics());
int64 sync_child_id = typed_url_root.GetFirstChildId();
while (sync_child_id != sync_api::kInvalidId) {
sync_api::ReadNode sync_child_node(&trans);
if (!sync_child_node.InitByIdLookup(sync_child_id)) {
LOG(ERROR) << "Failed to fetch child node.";
return false;
}
const sync_pb::TypedUrlSpecifics& typed_url(
sync_child_node.GetTypedUrlSpecifics());
if (current_urls.find(typed_url.url()) == current_urls.end()) {
history::URLRow new_url(GURL(typed_url.url()));
if (current_urls.find(typed_url.url()) == current_urls.end()) {
history::URLRow new_url(GURL(typed_url.url()));
new_url.set_title(UTF8ToWide(typed_url.title()));
new_url.set_visit_count(typed_url.visit_count());
new_url.set_typed_count(typed_url.typed_count());
new_url.set_last_visit(
base::Time::FromInternalValue(typed_url.last_visit()));
new_url.set_hidden(typed_url.hidden());
new_url.set_title(UTF8ToWide(typed_url.title()));
new_url.set_visit_count(typed_url.visit_count());
new_url.set_typed_count(typed_url.typed_count());
new_url.set_last_visit(
base::Time::FromInternalValue(typed_url.last_visit()));
new_url.set_hidden(typed_url.hidden());
Associate(&typed_url.url(), sync_child_node.GetId());
new_urls.push_back(new_url);
}
Associate(&typed_url.url(), sync_child_node.GetId());
new_urls.push_back(new_url);
sync_child_id = sync_child_node.GetSuccessorId();
}
sync_child_id = sync_child_node.GetSuccessorId();
}
// Since we're on the history thread, we don't have to worry about updating
// the history database after closing the write transaction, since
// this is the only thread that writes to the database. We also don't have
// to worry about the sync model getting out of sync, because changes are
// propogated to the ChangeProcessor on this thread.
WriteToHistoryBackend(&titles, &new_urls, &updated_urls);
return true;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment