Skip to content
This repository has been archived by the owner on Apr 26, 2024. It is now read-only.

Rewrite userdir to be faster #4537

Merged
merged 71 commits into from
Mar 7, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
71 commits
Select commit Hold shift + click to select a range
07b82a2
rewrite userdir
hawkowl Jan 31, 2019
f993fd6
emulated
hawkowl Jan 31, 2019
e8dd750
fixes
hawkowl Jan 31, 2019
cf079f3
fixes
hawkowl Jan 31, 2019
e818649
fixes
hawkowl Jan 31, 2019
35b33d1
fixes
hawkowl Jan 31, 2019
c123c71
fixes
hawkowl Jan 31, 2019
1b271f2
fixes
hawkowl Jan 31, 2019
ee98058
Merge remote-tracking branch 'origin/develop' into hawkowl/speed-userdir
hawkowl Jan 31, 2019
994243e
fixes
hawkowl Jan 31, 2019
b845be4
Merge remote-tracking branch 'origin/develop' into hawkowl/speed-userdir
hawkowl Feb 1, 2019
487bdc0
pep8 fixes
hawkowl Feb 1, 2019
e7e94d7
cleanup
hawkowl Feb 1, 2019
be4f84b
remove unused code
hawkowl Feb 1, 2019
84a0240
fix
hawkowl Feb 1, 2019
060c5fb
fix
hawkowl Feb 1, 2019
766b86d
fix
hawkowl Feb 1, 2019
1b3bc5b
fix
hawkowl Feb 4, 2019
cf03ec7
changelog
hawkowl Feb 4, 2019
c3b168f
Merge remote-tracking branch 'origin/develop' into hawkowl/speed-userdir
hawkowl Feb 4, 2019
bd66799
fix, maybe
hawkowl Feb 4, 2019
2b3f166
fix, maybe
hawkowl Feb 4, 2019
c6a68b4
pep8
hawkowl Feb 5, 2019
f21dcc7
fix flakiness
hawkowl Feb 5, 2019
b11de34
we dont need this here
hawkowl Feb 5, 2019
829fb4a
Merge remote-tracking branch 'origin/develop' into hawkowl/speed-userdir
hawkowl Feb 13, 2019
2cd5abc
black
hawkowl Feb 13, 2019
e224f9c
Merge remote-tracking branch 'origin/develop' into hawkowl/speed-userdir
hawkowl Feb 13, 2019
ebe8bb5
some more test coverage
hawkowl Feb 13, 2019
aa13be6
some more test coverage
hawkowl Feb 13, 2019
3c4d418
some more test coverage
hawkowl Feb 13, 2019
ff78918
fix failure
hawkowl Feb 13, 2019
4716266
fix failure
hawkowl Feb 13, 2019
d3e216a
select distinct
hawkowl Feb 13, 2019
75174bb
fix remote
hawkowl Feb 13, 2019
f385f75
do a log
hawkowl Feb 13, 2019
a88c1d6
fix remote
hawkowl Feb 13, 2019
28239aa
fix remote
hawkowl Feb 13, 2019
a689842
try and fix
hawkowl Feb 13, 2019
06a93b0
try and fix
hawkowl Feb 13, 2019
fab3b33
try and fix
hawkowl Feb 13, 2019
2574889
try and fix
hawkowl Feb 13, 2019
a50de76
try and fix
hawkowl Feb 13, 2019
960b3f0
try and fix
hawkowl Feb 13, 2019
d64fbc6
try and fix
hawkowl Feb 13, 2019
7c5b66e
try and fix
hawkowl Feb 13, 2019
59334f5
try and fix
hawkowl Feb 13, 2019
42d0d3e
try and fix
hawkowl Feb 13, 2019
8110905
try and fix
hawkowl Feb 13, 2019
d0d8a6e
try and fix
hawkowl Feb 13, 2019
be857fd
Merge remote-tracking branch 'origin/develop' into hawkowl/speed-userdir
hawkowl Feb 25, 2019
8e64d86
Merge remote-tracking branch 'origin/develop' into hawkowl/speed-userdir
hawkowl Feb 25, 2019
e652138
fix
hawkowl Feb 25, 2019
43b71b3
cleanup
hawkowl Feb 26, 2019
6c46504
cleanup
hawkowl Feb 26, 2019
95ed0fa
cleanup
hawkowl Feb 26, 2019
385a075
Merge remote-tracking branch 'origin/develop' into hawkowl/speed-userdir
hawkowl Feb 26, 2019
1e7d2a4
Merge remote-tracking branch 'origin/develop' into hawkowl/speed-userdir
hawkowl Feb 27, 2019
eed6db8
update comments
hawkowl Feb 27, 2019
2bf1d6a
cleanup
hawkowl Feb 28, 2019
72e74f4
cleanup
hawkowl Feb 28, 2019
1335416
Merge remote-tracking branch 'origin/develop' into hawkowl/speed-userdir
hawkowl Feb 28, 2019
c628aaa
cleanup
hawkowl Feb 28, 2019
843d287
cleanup
hawkowl Feb 28, 2019
f35b3cf
cleanup
hawkowl Mar 1, 2019
8e963dd
cleanuo
hawkowl Mar 2, 2019
2847e65
Merge remote-tracking branch 'origin/develop' into hawkowl/speed-userdir
hawkowl Mar 2, 2019
ea0cc09
Merge remote-tracking branch 'origin/develop' into hawkowl/speed-userdir
hawkowl Mar 5, 2019
765c458
Merge remote-tracking branch 'origin/develop' into hawkowl/speed-userdir
hawkowl Mar 7, 2019
a8c48b5
review cleanup
hawkowl Mar 7, 2019
3408e8d
review cleanup
hawkowl Mar 7, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions changelog.d/4537.feature
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
The user directory has been rewritten to make it faster, with less chance of falling behind on a large server.
222 changes: 37 additions & 185 deletions synapse/handlers/user_directory.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

import logging

from six import iteritems
from six import iteritems, iterkeys

from twisted.internet import defer

Expand Down Expand Up @@ -63,10 +63,6 @@ def __init__(self, hs):
# When start up for the first time we need to populate the user_directory.
# This is a set of user_id's we've inserted already
self.initially_handled_users = set()
self.initially_handled_users_in_public = set()

self.initially_handled_users_share = set()
self.initially_handled_users_share_private_room = set()

# The current position in the current_state_delta stream
self.pos = None
Expand Down Expand Up @@ -140,7 +136,6 @@ def handle_user_deactivated(self, user_id):
# FIXME(#3714): We should probably do this in the same worker as all
# the other changes.
yield self.store.remove_from_user_dir(user_id)
yield self.store.remove_from_user_in_public_room(user_id)

@defer.inlineCallbacks
def _unsafe_process(self):
Expand Down Expand Up @@ -215,15 +210,13 @@ def _do_initial_spam(self):
logger.info("Processed all users")

self.initially_handled_users = None
self.initially_handled_users_in_public = None
self.initially_handled_users_share = None
self.initially_handled_users_share_private_room = None

yield self.store.update_user_directory_stream_pos(new_pos)

@defer.inlineCallbacks
def _handle_initial_room(self, room_id):
"""Called when we initially fill out user_directory one room at a time
"""
Called when we initially fill out user_directory one room at a time
"""
is_in_room = yield self.store.is_host_joined(room_id, self.server_name)
if not is_in_room:
Expand All @@ -238,23 +231,15 @@ def _handle_initial_room(self, room_id):
unhandled_users = user_ids - self.initially_handled_users

yield self.store.add_profiles_to_user_dir(
room_id,
{user_id: users_with_profile[user_id] for user_id in unhandled_users},
)

self.initially_handled_users |= unhandled_users

if is_public:
yield self.store.add_users_to_public_room(
room_id, user_ids=user_ids - self.initially_handled_users_in_public
)
self.initially_handled_users_in_public |= user_ids
hawkowl marked this conversation as resolved.
Show resolved Hide resolved

# We now go and figure out the new users who share rooms with user entries
# We sleep aggressively here as otherwise it can starve resources.
# We also batch up inserts/updates, but try to avoid too many at once.
to_insert = set()
to_update = set()
count = 0
for user_id in user_ids:
if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
Expand All @@ -277,44 +262,18 @@ def _handle_initial_room(self, room_id):
count += 1

user_set = (user_id, other_user_id)
richvdh marked this conversation as resolved.
Show resolved Hide resolved

if user_set in self.initially_handled_users_share_private_room:
continue

if user_set in self.initially_handled_users_share:
if is_public:
continue
to_update.add(user_set)
else:
to_insert.add(user_set)

if is_public:
self.initially_handled_users_share.add(user_set)
else:
self.initially_handled_users_share_private_room.add(user_set)
to_insert.add(user_set)

if len(to_insert) > self.INITIAL_ROOM_BATCH_SIZE:
yield self.store.add_users_who_share_room(
room_id, not is_public, to_insert
)
to_insert.clear()

if len(to_update) > self.INITIAL_ROOM_BATCH_SIZE:
yield self.store.update_users_who_share_room(
room_id, not is_public, to_update
)
to_update.clear()

if to_insert:
yield self.store.add_users_who_share_room(room_id, not is_public, to_insert)
to_insert.clear()

if to_update:
yield self.store.update_users_who_share_room(
room_id, not is_public, to_update
)
to_update.clear()

@defer.inlineCallbacks
def _handle_deltas(self, deltas):
"""Called with the state deltas to process
Expand Down Expand Up @@ -356,6 +315,7 @@ def _handle_deltas(self, deltas):
user_ids = yield self.store.get_users_in_dir_due_to_room(
room_id
)

for user_id in user_ids:
yield self._handle_remove_user(room_id, user_id)
return
Expand Down Expand Up @@ -436,14 +396,20 @@ def _handle_room_publicity_change(self, room_id, prev_event_id, event_id, typ):
# ignore the change
return

if change:
users_with_profile = yield self.state.get_current_user_in_room(room_id)
for user_id, profile in iteritems(users_with_profile):
yield self._handle_new_user(room_id, user_id, profile)
else:
users = yield self.store.get_users_in_public_due_to_room(room_id)
for user_id in users:
yield self._handle_remove_user(room_id, user_id)
users_with_profile = yield self.state.get_current_user_in_room(room_id)

# Remove every user from the sharing tables for that room.
for user_id in iterkeys(users_with_profile):
yield self.store.remove_user_who_share_room(user_id, room_id)

# Then, re-add them to the tables.
# NOTE: this is not the most efficient method, as handle_new_user sets
# up local_user -> other_user and other_user_whos_local -> local_user,
# which when ran over an entire room, will result in the same values
# being added multiple times. The batching upserts shouldn't make this
# too bad, though.
for user_id, profile in iteritems(users_with_profile):
yield self._handle_new_user(room_id, user_id, profile)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it feels like we're (still) repeating ourselves here.

Suppose there are 3 local users in the room: A, B, C.

First we call _handle_new_user with A. That will add entries: (A, A), (A, B), (A, C), (B, A), (C, A).
Then we call _handle_new_user with B. That will add entries: (B, B), (B, A), (B, C), (A, B), (C, B).
Finally we call _handle_new_user with C. That will add entries: (C, C), (C, A), (C, B), (A, C), (B, C).

So most of those entries are being added twice.

(happy if you want to say that's an existing problem, to be ignored for now, but since we're rewriting this it seems a reasonable time to consider it, or at least add a comment)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Without the adding yourself, that drops one of those, but yes, it does repeat that, but that's an existing problem.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Without the adding yourself, that drops one of those

Ironically the "adding yourself" was the only entry which wasn't being redone.

yes, it does repeat that, but that's an existing problem.

Fair enough. I'd still be happier if you could add a TODO or something so that I know I'm not going mad next time I look at this code.


@defer.inlineCallbacks
def _handle_local_user(self, user_id):
Expand All @@ -457,7 +423,7 @@ def _handle_local_user(self, user_id):

row = yield self.store.get_user_in_directory(user_id)
if not row:
yield self.store.add_profiles_to_user_dir(None, {user_id: profile})
yield self.store.add_profiles_to_user_dir({user_id: profile})

@defer.inlineCallbacks
def _handle_new_user(self, room_id, user_id, profile):
Expand All @@ -471,55 +437,27 @@ def _handle_new_user(self, room_id, user_id, profile):

row = yield self.store.get_user_in_directory(user_id)
if not row:
yield self.store.add_profiles_to_user_dir(room_id, {user_id: profile})
yield self.store.add_profiles_to_user_dir({user_id: profile})

is_public = yield self.store.is_room_world_readable_or_publicly_joinable(
room_id
)

if is_public:
row = yield self.store.get_user_in_public_room(user_id)
if not row:
yield self.store.add_users_to_public_room(room_id, [user_id])
else:
logger.debug("Not adding new user to public dir, %r", user_id)

# Now we update users who share rooms with users. We do this by getting
# all the current users in the room and seeing which aren't already
# marked in the database as sharing with `user_id`

# Now we update users who share rooms with users.
users_with_profile = yield self.state.get_current_user_in_room(room_id)

to_insert = set()
to_update = set()

is_appservice = self.store.get_if_app_services_interested_in_user(user_id)

# First, if they're our user then we need to update for every user
if self.is_mine_id(user_id) and not is_appservice:
# Returns a map of other_user_id -> shared_private. We only need
# to update mappings if for users that either don't share a room
# already (aren't in the map) or, if the room is private, those that
# only share a public room.
user_ids_shared = yield self.store.get_users_who_share_room_from_dir(
user_id
)
if self.is_mine_id(user_id):

for other_user_id in users_with_profile:
if user_id == other_user_id:
continue
is_appservice = self.store.get_if_app_services_interested_in_user(user_id)

# We don't care about appservice users.
if not is_appservice:
for other_user_id in users_with_profile:
if user_id == other_user_id:
continue

shared_is_private = user_ids_shared.get(other_user_id)
if shared_is_private is True:
# We've already marked in the database they share a private room
continue
elif shared_is_private is False:
# They already share a public room, so only update if this is
# a private room
if not is_public:
to_update.add((user_id, other_user_id))
elif shared_is_private is None:
# This is the first time they both share a room
to_insert.add((user_id, other_user_id))

# Next we need to update for every local user in the room
Expand All @@ -531,29 +469,11 @@ def _handle_new_user(self, room_id, user_id, profile):
other_user_id
)
if self.is_mine_id(other_user_id) and not is_appservice:
shared_is_private = yield self.store.get_if_users_share_a_room(
other_user_id, user_id
)
if shared_is_private is True:
# We've already marked in the database they share a private room
continue
elif shared_is_private is False:
# They already share a public room, so only update if this is
# a private room
if not is_public:
to_update.add((other_user_id, user_id))
elif shared_is_private is None:
# This is the first time they both share a room
to_insert.add((other_user_id, user_id))
to_insert.add((other_user_id, user_id))

if to_insert:
yield self.store.add_users_who_share_room(room_id, not is_public, to_insert)

if to_update:
yield self.store.update_users_who_share_room(
room_id, not is_public, to_update
)

@defer.inlineCallbacks
def _handle_remove_user(self, room_id, user_id):
"""Called when we might need to remove user to directory
Expand All @@ -562,84 +482,16 @@ def _handle_remove_user(self, room_id, user_id):
room_id (str): room_id that user left or stopped being public that
user_id (str)
"""
logger.debug("Maybe removing user %r", user_id)

row = yield self.store.get_user_in_directory(user_id)
update_user_dir = row and row["room_id"] == room_id

row = yield self.store.get_user_in_public_room(user_id)
update_user_in_public = row and row["room_id"] == room_id

if update_user_in_public or update_user_dir:
# XXX: Make this faster?
rooms = yield self.store.get_rooms_for_user(user_id)
for j_room_id in rooms:
if not update_user_in_public and not update_user_dir:
break

is_in_room = yield self.store.is_host_joined(
j_room_id, self.server_name
)

if not is_in_room:
continue

if update_user_dir:
update_user_dir = False
yield self.store.update_user_in_user_dir(user_id, j_room_id)
logger.debug("Removing user %r", user_id)

is_public = yield self.store.is_room_world_readable_or_publicly_joinable(
j_room_id
)
# Remove user from sharing tables
yield self.store.remove_user_who_share_room(user_id, room_id)

if update_user_in_public and is_public:
yield self.store.update_user_in_public_user_list(user_id, j_room_id)
update_user_in_public = False
# Are they still in a room with members? If not, remove them entirely.
hawkowl marked this conversation as resolved.
Show resolved Hide resolved
users_in_room_with = yield self.store.get_users_who_share_room_from_dir(user_id)

if update_user_dir:
if len(users_in_room_with) == 0:
yield self.store.remove_from_user_dir(user_id)
elif update_user_in_public:
yield self.store.remove_from_user_in_public_room(user_id)

# Now handle users_who_share_rooms.

# Get a list of user tuples that were in the DB due to this room and
# users (this includes tuples where the other user matches `user_id`)
user_tuples = yield self.store.get_users_in_share_dir_with_room_id(
user_id, room_id
)

for user_id, other_user_id in user_tuples:
# For each user tuple get a list of rooms that they still share,
# trying to find a private room, and update the entry in the DB
rooms = yield self.store.get_rooms_in_common_for_users(
user_id, other_user_id
)

# If they dont share a room anymore, remove the mapping
if not rooms:
yield self.store.remove_user_who_share_room(user_id, other_user_id)
continue

found_public_share = None
for j_room_id in rooms:
is_public = yield self.store.is_room_world_readable_or_publicly_joinable(
j_room_id
)

if is_public:
found_public_share = j_room_id
else:
found_public_share = None
yield self.store.update_users_who_share_room(
room_id, not is_public, [(user_id, other_user_id)]
)
break

if found_public_share:
yield self.store.update_users_who_share_room(
room_id, not is_public, [(user_id, other_user_id)]
)

@defer.inlineCallbacks
def _handle_profile_change(self, user_id, room_id, prev_event_id, event_id):
Expand Down
47 changes: 47 additions & 0 deletions synapse/storage/schema/delta/53/user_share.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
/* Copyright 2017 Vector Creations Ltd, 2019 New Vector Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

-- Old disused version of the tables below.
DROP TABLE IF EXISTS users_who_share_rooms;

-- This is no longer used because it's duplicated by the users_who_share_public_rooms
DROP TABLE IF EXISTS users_in_public_rooms;

-- Tables keeping track of what users share rooms. This is a map of local users
-- to local or remote users, per room. Remote users cannot be in the user_id
-- column, only the other_user_id column. There are two tables, one for public
-- rooms and those for private rooms.
CREATE TABLE IF NOT EXISTS users_who_share_public_rooms (
user_id TEXT NOT NULL,
other_user_id TEXT NOT NULL,
room_id TEXT NOT NULL
);

CREATE TABLE IF NOT EXISTS users_who_share_private_rooms (
user_id TEXT NOT NULL,
other_user_id TEXT NOT NULL,
room_id TEXT NOT NULL
);

CREATE UNIQUE INDEX users_who_share_public_rooms_u_idx ON users_who_share_public_rooms(user_id, other_user_id, room_id);
CREATE INDEX users_who_share_public_rooms_r_idx ON users_who_share_public_rooms(room_id);
CREATE INDEX users_who_share_public_rooms_o_idx ON users_who_share_public_rooms(other_user_id);

CREATE UNIQUE INDEX users_who_share_private_rooms_u_idx ON users_who_share_private_rooms(user_id, other_user_id, room_id);
CREATE INDEX users_who_share_private_rooms_r_idx ON users_who_share_private_rooms(room_id);
CREATE INDEX users_who_share_private_rooms_o_idx ON users_who_share_private_rooms(other_user_id);

-- Make sure that we populate the tables initially by resetting the stream ID
UPDATE user_directory_stream_pos SET stream_id = NULL;
Loading