Skip to content

Commit

Permalink
Change schedule primary key from consensus_timestamp to schedule_id (#…
Browse files Browse the repository at this point in the history
…2382)

* Change schedule primary key from consensus_timestamp to schedule_id
* Drop unused consensus_timestamp index
* Fix schedule REST API ordering by wrong field

Signed-off-by: Steven Sheehy <steven.sheehy@hedera.com>
  • Loading branch information
steven-sheehy authored Aug 10, 2021
1 parent 10cbbb0 commit e9aa742
Show file tree
Hide file tree
Showing 13 changed files with 88 additions and 77 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,11 @@
import lombok.ToString;

import com.hedera.mirror.importer.converter.AccountIdConverter;
import com.hedera.mirror.importer.converter.ScheduleIdConverter;

@Data
@Entity
@NoArgsConstructor
public class Schedule {
@Id
private Long consensusTimestamp;

@Convert(converter = AccountIdConverter.class)
Expand All @@ -45,9 +43,13 @@ public class Schedule {
@Convert(converter = AccountIdConverter.class)
private EntityId payerAccountId;

@Convert(converter = ScheduleIdConverter.class)
private EntityId scheduleId;
@Id
private Long scheduleId;

@ToString.Exclude
private byte[] transactionBody;

public void setScheduleId(EntityId scheduleId) {
this.scheduleId = scheduleId != null ? scheduleId.getId() : null;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,7 @@ public void onNonFeeTransfer(NonFeeTransfer nonFeeTransfer) throws ImporterExcep
@Override
public void onSchedule(Schedule schedule) throws ImporterException {
// schedules could experience multiple updates in a single record file, handle updates in memory for this case
schedules.merge(schedule.getScheduleId().getId(), schedule, this::mergeSchedule);
schedules.merge(schedule.getScheduleId(), schedule, this::mergeSchedule);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,11 @@
import org.springframework.data.repository.CrudRepository;
import org.springframework.data.repository.query.Param;

import com.hedera.mirror.importer.domain.EntityId;
import com.hedera.mirror.importer.domain.Schedule;

public interface ScheduleRepository extends CrudRepository<Schedule, Long> {
@Modifying
@Transactional
@Query("update Schedule set executedTimestamp = :timestamp where scheduleId = :schedule")
void updateExecutedTimestamp(@Param("schedule") EntityId scheduleId, @Param("timestamp") long executedTimestamp);
void updateExecutedTimestamp(@Param("schedule") Long scheduleId, @Param("timestamp") long executedTimestamp);
}
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ public class ScheduleUpsertQueryGenerator extends AbstractUpsertQueryGenerator<S
private final String temporaryTableName = getFinalTableName() + "_temp";
// scheduleId is used for completeness
private final List<String> v1ConflictIdColumns = List.of(Schedule_.SCHEDULE_ID);
private final List<String> v2ConflictIdColumns = List.of(Schedule_.CONSENSUS_TIMESTAMP, Schedule_.SCHEDULE_ID);
private final List<String> v2ConflictIdColumns = v1ConflictIdColumns;
private final Set<String> nullableColumns = Set.of(Schedule_.EXECUTED_TIMESTAMP);
private final Set<String> nonUpdatableColumns = Set.of(Schedule_.CONSENSUS_TIMESTAMP,
Schedule_.CREATOR_ACCOUNT_ID, Schedule_.PAYER_ACCOUNT_ID, Schedule_.SCHEDULE_ID,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
-------------------
-- Change the primary key of schedule from consensus_timestamp to schedule_id
-------------------

drop index if exists schedule__schedule_id;

alter table if exists schedule
drop constraint if exists schedule_pkey;

alter table if exists schedule
add primary key (schedule_id);
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,12 @@ CREATE TYPE token_supply_type AS ENUM ('INFINITE', 'FINITE');
CREATE TYPE token_type AS ENUM ('FUNGIBLE_COMMON', 'NON_FUNGIBLE_UNIQUE');

-- assessed_custom_fee
create table if not exists assessed_custom_fee (
amount bigint not null,
collector_account_id bigint not null,
consensus_timestamp bigint not null,
token_id bigint
create table if not exists assessed_custom_fee
(
amount bigint not null,
collector_account_id bigint not null,
consensus_timestamp bigint not null,
token_id bigint
);
comment on table assessed_custom_fee is 'Assessed custom fees for HTS transactions';

Expand Down Expand Up @@ -66,10 +67,10 @@ comment on table address_book_entry is 'Network address book node entries';
-- address_book_service_endpoint
create table if not exists address_book_service_endpoint
(
consensus_timestamp bigint not null,
ip_address_v4 varchar(15) not null,
node_id bigint not null,
port integer default -1 not null
consensus_timestamp bigint not null,
ip_address_v4 varchar(15) not null,
node_id bigint not null,
port integer default -1 not null
);
comment on table address_book_service_endpoint is 'Network address book node service endpoints';

Expand Down Expand Up @@ -115,17 +116,17 @@ create table if not exists entity
created_timestamp bigint,
deleted boolean,
expiration_timestamp bigint,
id bigint not null,
id bigint not null,
key bytea,
memo text default '' not null,
memo text default '' not null,
modified_timestamp bigint,
num bigint not null,
num bigint not null,
proxy_account_id bigint,
public_key character varying,
realm bigint not null,
shard bigint not null,
realm bigint not null,
shard bigint not null,
submit_key bytea,
type integer not null
type integer not null
);
comment on table entity is 'Network entity with state';

Expand Down Expand Up @@ -167,24 +168,24 @@ create table if not exists live_hash
-- nft
create table if not exists nft
(
account_id bigint,
created_timestamp bigint,
deleted boolean,
modified_timestamp bigint not null,
metadata bytea,
serial_number bigint not null,
token_id bigint not null
account_id bigint,
created_timestamp bigint,
deleted boolean,
modified_timestamp bigint not null,
metadata bytea,
serial_number bigint not null,
token_id bigint not null
);
comment on table nft is 'Non-Fungible Tokens (NFTs) minted on network';

-- nft_transfer
create table if not exists nft_transfer
(
consensus_timestamp bigint not null,
receiver_account_id bigint,
sender_account_id bigint,
serial_number bigint not null,
token_id bigint not null
consensus_timestamp bigint not null,
receiver_account_id bigint,
sender_account_id bigint,
serial_number bigint not null,
token_id bigint not null
);
comment on table nft_transfer is 'Crypto account nft transfers';

Expand Down Expand Up @@ -223,12 +224,12 @@ comment on table record_file is 'Network record file stream entries';
-- schedule
create table if not exists schedule
(
consensus_timestamp bigint primary key not null,
creator_account_id bigint not null,
executed_timestamp bigint null,
payer_account_id bigint not null,
schedule_id bigint not null,
transaction_body bytea not null
consensus_timestamp bigint not null,
creator_account_id bigint not null,
executed_timestamp bigint null,
payer_account_id bigint not null,
schedule_id bigint not null,
transaction_body bytea not null
);
comment on table schedule is 'Schedule entity entries';

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,10 @@ select create_hypertable('crypto_transfer', 'consensus_timestamp', chunk_time_in
select create_hypertable('custom_fee', 'created_timestamp', chunk_time_interval => ${chunkTimeInterval},
create_default_indexes => false, if_not_exists => true);

-- entity
select create_hypertable('entity', 'id', chunk_time_interval => ${chunkIdInterval},
create_default_indexes => false, if_not_exists => true);

-- event_file
select create_hypertable('event_file', 'consensus_end', chunk_time_interval => ${chunkTimeInterval},
create_default_indexes => false, if_not_exists => true);
Expand Down Expand Up @@ -64,11 +68,7 @@ select create_hypertable('record_file', 'consensus_end', chunk_time_interval =>
create_default_indexes => false, if_not_exists => true);

-- schedule
select create_hypertable('schedule', 'consensus_timestamp', chunk_time_interval => ${chunkTimeInterval},
create_default_indexes => false, if_not_exists => true);

-- entity
select create_hypertable('entity', 'id', chunk_time_interval => ${chunkIdInterval},
select create_hypertable('schedule', 'schedule_id', chunk_time_interval => ${chunkTimeInterval},
create_default_indexes => false, if_not_exists => true);

-- t_entity_types hyper table creation skipped as it serves only as a reference table and rarely gets updated
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -98,9 +98,8 @@ create index if not exists record_file__prev_hash
on record_file (prev_hash);

-- schedule
create unique index if not exists schedule__schedule_id
on schedule (schedule_id desc, consensus_timestamp desc);

alter table if exists schedule
add primary key (schedule_id);
create index if not exists schedule__creator_account_id
on schedule (creator_account_id desc);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,14 @@ class EntityRecordItemListenerScheduleTest extends AbstractEntityRecordItemListe

private List<TransactionSignature> defaultSignatureList;

private static Stream<Arguments> provideScheduleCreatePayer() {
return Stream.of(
Arguments.of(null, PAYER, "no payer expect same as creator"),
Arguments.of(PAYER, PAYER, "payer set to creator"),
Arguments.of(PAYER2, PAYER2, "payer different than creator")
);
}

@BeforeEach
void before() {
entityProperties.getPersist().setSchedules(true);
Expand Down Expand Up @@ -242,14 +250,6 @@ void scheduleExecute(ResponseCodeEnum responseCodeEnum) {
assertTransactionInRepository(EXECUTE_TIMESTAMP, true, responseCodeEnum);
}

private static Stream<Arguments> provideScheduleCreatePayer() {
return Stream.of(
Arguments.of(null, PAYER, "no payer expect same as creator"),
Arguments.of(PAYER, PAYER, "payer set to creator"),
Arguments.of(PAYER2, PAYER2, "payer different than creator")
);
}

private Transaction scheduleCreateTransaction(AccountID payer) {
return buildTransaction(builder -> {
ScheduleCreateTransactionBody.Builder scheduleCreateBuilder = builder.getScheduleCreateBuilder();
Expand Down Expand Up @@ -347,10 +347,11 @@ private void insertScheduledTransaction(long signTimestamp, ScheduleID scheduleI

private void assertScheduleInRepository(ScheduleID scheduleID, long createdTimestamp, AccountID payer,
Long executedTimestamp) {
assertThat(scheduleRepository.findById(createdTimestamp)).get()
Long scheduleEntityId = EntityId.of(scheduleID).getId();
assertThat(scheduleRepository.findById(scheduleEntityId)).get()
.returns(createdTimestamp, from(Schedule::getConsensusTimestamp))
.returns(executedTimestamp, from(Schedule::getExecutedTimestamp))
.returns(EntityId.of(scheduleID), from(Schedule::getScheduleId))
.returns(scheduleEntityId, from(Schedule::getScheduleId))
.returns(EntityId.of(PAYER), from(Schedule::getCreatorAccountId))
.returns(EntityId.of(payer), from(Schedule::getPayerAccountId))
.returns(SCHEDULED_TRANSACTION_BODY.toByteArray(), from(Schedule::getTransactionBody));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -116,14 +116,16 @@ class SqlEntityListenerTest extends IntegrationTest {
private final SqlEntityListener sqlEntityListener;
private final SqlProperties sqlProperties;
private final TransactionTemplate transactionTemplate;

private final String fileName = "2019-08-30T18_10_00.419072Z.rcd";
@Qualifier(CacheConfiguration.EXPIRE_AFTER_30M)
@Resource
private CacheManager cacheManager;

private final String fileName = "2019-08-30T18_10_00.419072Z.rcd";
private RecordFile recordFile;

private static Key keyFromString(String key) {
return Key.newBuilder().setEd25519(ByteString.copyFromUtf8(key)).build();
}

@BeforeEach
final void beforeEach() {
String newFileHash = UUID.randomUUID().toString();
Expand Down Expand Up @@ -733,8 +735,8 @@ void onSchedule() throws Exception {
// then
assertThat(recordFileRepository.findAll()).containsExactly(recordFile);
assertEquals(2, scheduleRepository.count());
assertExistsAndEquals(scheduleRepository, schedule1, 1L);
assertExistsAndEquals(scheduleRepository, schedule2, 2L);
assertExistsAndEquals(scheduleRepository, schedule1, entityId1.getId());
assertExistsAndEquals(scheduleRepository, schedule2, entityId2.getId());
}

@Test
Expand Down Expand Up @@ -791,7 +793,7 @@ void onScheduleMerge() throws Exception {
scheduleMerged.setExecutedTimestamp(5L);
assertThat(recordFileRepository.findAll()).containsExactly(recordFile);
assertEquals(1, scheduleRepository.count());
assertExistsAndEquals(scheduleRepository, scheduleMerged, 1L);
assertExistsAndEquals(scheduleRepository, scheduleMerged, entityId.getId());
}

private <T, ID> void assertExistsAndEquals(CrudRepository<T, ID> repository, T expected, ID id) throws Exception {
Expand Down Expand Up @@ -838,10 +840,6 @@ private Entity getEntity(long id, Long createdTimestamp, long modifiedTimestamp,
return getEntity(id, createdTimestamp, modifiedTimestamp, memo, null, null, null, null, null, null);
}

private static Key keyFromString(String key) {
return Key.newBuilder().setEd25519(ByteString.copyFromUtf8(key)).build();
}

private Entity getEntity(long id, Long createdTimestamp, long modifiedTimestamp, String memo,
Key adminKey, EntityId autoRenewAccountId, Long autoRenewPeriod,
Boolean deleted, Long expiryTimeNs, Key submitKey) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ class ScheduleRepositoryTest extends AbstractRepositoryTest {
@Test
void save() {
Schedule schedule = scheduleRepository.save(schedule(1));
assertThat(scheduleRepository.findById(schedule.getConsensusTimestamp()))
assertThat(scheduleRepository.findById(schedule.getScheduleId()))
.get().isEqualTo(schedule);
}

Expand All @@ -46,7 +46,7 @@ void updateExecutedTimestamp() {
Schedule schedule = scheduleRepository.save(schedule(1));
long newExecutedTimestamp = 1000L;
scheduleRepository.updateExecutedTimestamp(schedule.getScheduleId(), newExecutedTimestamp);
assertThat(scheduleRepository.findById(schedule.getConsensusTimestamp())).get()
assertThat(scheduleRepository.findById(schedule.getScheduleId())).get()
.returns(newExecutedTimestamp, from(Schedule::getExecutedTimestamp));
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,6 @@ protected String getInsertQuery() {
"schedule_id, transaction_body) select schedule_temp.consensus_timestamp, schedule_temp" +
".creator_account_id, schedule_temp.executed_timestamp, schedule_temp.payer_account_id, schedule_temp" +
".schedule_id, schedule_temp.transaction_body from schedule_temp where schedule_temp" +
".consensus_timestamp is not null on conflict (consensus_timestamp, schedule_id) do nothing";
".consensus_timestamp is not null on conflict (schedule_id) do nothing";
}
}
10 changes: 5 additions & 5 deletions hedera-mirror-rest/schedules.js
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,12 @@ const {NotFoundError} = require('./errors/notFoundError');
const scheduleSelectFields = [
'e.key',
's.consensus_timestamp',
'creator_account_id',
'executed_timestamp',
's.creator_account_id',
's.executed_timestamp',
'e.memo',
'payer_account_id',
's.payer_account_id',
's.schedule_id',
'transaction_body',
's.transaction_body',
`json_agg(
json_build_object(
'consensus_timestamp', ts.consensus_timestamp::text,
Expand All @@ -60,7 +60,7 @@ const entityIdJoinQuery = 'join entity e on e.id = s.schedule_id';
const groupByQuery = 'group by e.key, e.memo, s.consensus_timestamp, s.schedule_id';
const scheduleIdMatchQuery = 'where s.schedule_id = $1';
const scheduleLimitQuery = (paramCount) => `limit $${paramCount}`;
const scheduleOrderQuery = (order) => `order by s.consensus_timestamp ${order}`;
const scheduleOrderQuery = (order) => `order by s.schedule_id ${order}`;
const scheduleSelectQuery = ['select', scheduleSelectFields.join(',\n'), 'from schedule s'].join('\n');
const signatureJoinQuery = 'left join transaction_signature ts on ts.entity_id = s.schedule_id';

Expand Down

0 comments on commit e9aa742

Please sign in to comment.