Skip to content

Commit

Permalink
Merge branch 'opencv:develop' into import_annotation_filename_test
Browse files Browse the repository at this point in the history
  • Loading branch information
umangapatel123 committed Mar 27, 2024
2 parents 2433667 + d0e1143 commit 522d48c
Show file tree
Hide file tree
Showing 9 changed files with 135 additions and 20 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
### Fixed

- Duplicated notifications for automatic annotation
(<https://github.com/opencv/cvat/pull/7595>)
5 changes: 5 additions & 0 deletions changelog.d/20240326_195957_tahamukhtar20.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
### Fixed

- 90 deg-rotated video was added with "Prefer Zip Chunks" disabled
was warped, fixed using the static cropImage function.
(<https://github.com/opencv/cvat/pull/7583>)
2 changes: 1 addition & 1 deletion cvat-core/src/lambda-manager.ts
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ class LambdaManager {

async listen(
requestID: string,
functionID: string,
functionID: string | number,
callback: (status: RQStatus, progress: number, message?: string) => void,
): Promise<void> {
const model = this.cachedList.find((_model) => _model.id === functionID);
Expand Down
39 changes: 37 additions & 2 deletions cvat-data/src/ts/cvat-data.ts
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,36 @@ export class FrameDecoder {
return null;
}

static cropImage(
imageBuffer: ArrayBuffer,
imageWidth: number,
imageHeight: number,
width: number,
height: number,
): ImageData {
if (width === imageWidth && height === imageHeight) {
return new ImageData(new Uint8ClampedArray(imageBuffer), width, height);
}
const source = new Uint32Array(imageBuffer);

const bufferSize = width * height * 4;
if (imageWidth === width) {
return new ImageData(new Uint8ClampedArray(imageBuffer, 0, bufferSize), width, height);
}

const buffer = new ArrayBuffer(bufferSize);
const rgbaInt32 = new Uint32Array(buffer);
const rgbaInt8Clamped = new Uint8ClampedArray(buffer);
let writeIdx = 0;
for (let row = 0; row < height; row++) {
const start = row * imageWidth;
rgbaInt32.set(source.subarray(start, start + width), writeIdx);
writeIdx += width;
}

return new ImageData(rgbaInt8Clamped, width, height);
}

async startDecode(): Promise<void> {
const blockToDecode = { ...this.requestedChunkToDecode };
const release = await this.mutex.acquire();
Expand Down Expand Up @@ -239,8 +269,13 @@ export class FrameDecoder {
const height = Math.round(this.renderHeight / scaleFactor);
const width = Math.round(this.renderWidth / scaleFactor);

const array = new Uint8ClampedArray(e.data.buf.slice(0, width * height * 4));
createImageBitmap(new ImageData(array, width)).then((bitmap) => {
createImageBitmap(FrameDecoder.cropImage(
e.data.buf,
e.data.width,
e.data.height,
width,
height,
)).then((bitmap) => {
decodedFrames[keptIndex] = bitmap;
this.chunkIsBeingDecoded.onDecode(keptIndex, decodedFrames[keptIndex]);

Expand Down
2 changes: 1 addition & 1 deletion cvat-ui/package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "cvat-ui",
"version": "1.63.3",
"version": "1.63.4",
"description": "CVAT single-page application",
"main": "src/index.tsx",
"scripts": {
Expand Down
30 changes: 22 additions & 8 deletions cvat-ui/src/actions/models-actions.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022-2023 CVAT.ai Corporation
// Copyright (C) 2022-2024 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT

Expand All @@ -20,6 +20,7 @@ export enum ModelsActionTypes {
DELETE_MODEL = 'DELETE_MODEL',
DELETE_MODEL_SUCCESS = 'DELETE_MODEL_SUCCESS',
DELETE_MODEL_FAILED = 'DELETE_MODEL_FAILED',
GET_INFERENCES_SUCCESS = 'GET_INFERENCES_SUCCESS',
START_INFERENCE_FAILED = 'START_INFERENCE_FAILED',
GET_INFERENCE_STATUS_SUCCESS = 'GET_INFERENCE_STATUS_SUCCESS',
GET_INFERENCE_STATUS_FAILED = 'GET_INFERENCE_STATUS_FAILED',
Expand All @@ -45,6 +46,9 @@ export const modelsActions = {
error,
}),
fetchMetaFailed: (error: any) => createAction(ModelsActionTypes.FETCH_META_FAILED, { error }),
getInferencesSuccess: (requestedInferenceIDs: Record<string, boolean>) => (
createAction(ModelsActionTypes.GET_INFERENCES_SUCCESS, { requestedInferenceIDs })
),
getInferenceStatusSuccess: (taskID: number, activeInference: ActiveInference) => (
createAction(ModelsActionTypes.GET_INFERENCE_STATUS_SUCCESS, {
taskID,
Expand All @@ -64,9 +68,10 @@ export const modelsActions = {
error,
})
),
cancelInferenceSuccess: (taskID: number) => (
cancelInferenceSuccess: (taskID: number, activeInference: ActiveInference) => (
createAction(ModelsActionTypes.CANCEL_INFERENCE_SUCCESS, {
taskID,
activeInference,
})
),
cancelInferenceFailed: (taskID: number, error: any) => (
Expand Down Expand Up @@ -119,8 +124,9 @@ interface InferenceMeta {

function listen(inferenceMeta: InferenceMeta, dispatch: (action: ModelsActions) => void): void {
const { taskID, requestID, functionID } = inferenceMeta;

core.lambda
.listen(requestID, functionID, (status: RQStatus, progress: number, message: string) => {
.listen(requestID, functionID, (status: RQStatus, progress: number, message?: string) => {
if (status === RQStatus.FAILED || status === RQStatus.UNKNOWN) {
dispatch(
modelsActions.getInferenceStatusFailed(
Expand All @@ -129,7 +135,7 @@ function listen(inferenceMeta: InferenceMeta, dispatch: (action: ModelsActions)
status,
progress,
functionID,
error: message,
error: message as string,
id: requestID,
},
new Error(`Inference status for the task ${taskID} is ${status}. ${message}`),
Expand All @@ -144,7 +150,7 @@ function listen(inferenceMeta: InferenceMeta, dispatch: (action: ModelsActions)
status,
progress,
functionID,
error: message,
error: message as string,
id: requestID,
}),
);
Expand All @@ -163,22 +169,29 @@ function listen(inferenceMeta: InferenceMeta, dispatch: (action: ModelsActions)
}

export function getInferenceStatusAsync(): ThunkAction {
return async (dispatch): Promise<void> => {
return async (dispatch, getState): Promise<void> => {
const dispatchCallback = (action: ModelsActions): void => {
dispatch(action);
};

const { requestedInferenceIDs } = getState().models;

try {
const requests = await core.lambda.requests();
const newListenedIDs: Record<string, boolean> = {};
requests
.map((request: any): object => ({
taskID: +request.function.task,
requestID: request.id,
functionID: request.function.id,
}))
.forEach((inferenceMeta: InferenceMeta): void => {
listen(inferenceMeta, dispatchCallback);
if (!(inferenceMeta.requestID in requestedInferenceIDs)) {
listen(inferenceMeta, dispatchCallback);
newListenedIDs[inferenceMeta.requestID] = true;
}
});
dispatch(modelsActions.getInferencesSuccess(newListenedIDs));
} catch (error) {
dispatch(modelsActions.fetchMetaFailed(error));
}
Expand All @@ -201,6 +214,7 @@ export function startInferenceAsync(taskId: number, model: MLModel, body: object
},
dispatchCallback,
);
dispatch(modelsActions.getInferencesSuccess({ [requestID]: true }));
} catch (error) {
dispatch(modelsActions.startInferenceFailed(taskId, error));
}
Expand All @@ -212,7 +226,7 @@ export function cancelInferenceAsync(taskID: number): ThunkAction {
try {
const inference = getState().models.inferences[taskID];
await core.lambda.cancel(inference.id, inference.functionID);
dispatch(modelsActions.cancelInferenceSuccess(taskID));
dispatch(modelsActions.cancelInferenceSuccess(taskID, inference));
} catch (error) {
dispatch(modelsActions.cancelInferenceFailed(taskID, error));
}
Expand Down
3 changes: 3 additions & 0 deletions cvat-ui/src/reducers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -414,6 +414,9 @@ export interface ModelsState {
reid: MLModel[];
classifiers: MLModel[];
totalCount: number;
requestedInferenceIDs: {
[index: string]: boolean;
};
inferences: {
[index: number]: ActiveInference;
};
Expand Down
33 changes: 25 additions & 8 deletions cvat-ui/src/reducers/models-reducer.ts
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
// Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022-2023 CVAT.ai Corporation
// Copyright (C) 2022-2024 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT

import { omit } from 'lodash';
import { BoundariesActions, BoundariesActionTypes } from 'actions/boundaries-actions';
import { ModelsActionTypes, ModelsActions } from 'actions/models-actions';
import { AuthActionTypes, AuthActions } from 'actions/auth-actions';
Expand All @@ -20,6 +21,7 @@ const defaultState: ModelsState = {
classifiers: [],
modelRunnerIsVisible: false,
modelRunnerTask: null,
requestedInferenceIDs: {},
inferences: {},
totalCount: 0,
query: {
Expand Down Expand Up @@ -88,15 +90,28 @@ export default function (state = defaultState, action: ModelsActions | AuthActio
modelRunnerTask: null,
};
}
case ModelsActionTypes.GET_INFERENCES_SUCCESS: {
const { requestedInferenceIDs } = state;

return {
...state,
requestedInferenceIDs: {
...requestedInferenceIDs,
...action.payload.requestedInferenceIDs,
},
};
}
case ModelsActionTypes.GET_INFERENCE_STATUS_SUCCESS: {
const { inferences } = state;
const { inferences, requestedInferenceIDs } = state;

if (action.payload.activeInference.status === 'finished') {
const { taskID, activeInference } = action.payload;
const { id: inferenceID } = activeInference;

return {
...state,
inferences: Object.fromEntries(
Object.entries(inferences).filter(([key]): boolean => +key !== action.payload.taskID),
),
inferences: omit(inferences, taskID),
requestedInferenceIDs: omit(requestedInferenceIDs, inferenceID),
};
}

Expand All @@ -123,12 +138,14 @@ export default function (state = defaultState, action: ModelsActions | AuthActio
};
}
case ModelsActionTypes.CANCEL_INFERENCE_SUCCESS: {
const { inferences } = state;
delete inferences[action.payload.taskID];
const { inferences, requestedInferenceIDs } = state;
const { taskID, activeInference } = action.payload;
const { id: inferenceID } = activeInference;

return {
...state,
inferences: { ...inferences },
inferences: omit(inferences, taskID),
requestedInferenceIDs: omit(requestedInferenceIDs, inferenceID),
};
}
case ModelsActionTypes.GET_MODEL_PREVIEW: {
Expand Down
37 changes: 37 additions & 0 deletions tests/cypress/e2e/features/masks_basics.js
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,43 @@ context('Manipulations with masks', { scrollBehavior: false }, () => {
cy.drawMask(editingActions);
cy.finishMaskDrawing();
});

it('Underlying pixels are removed on enabling "Remove underlying pixels" tool', () => {
const mask1 = [{
method: 'brush',
coordinates: [[450, 250], [600, 400]],
}];
const mask2 = [{
method: 'brush',
coordinates: [[450, 250], [525, 325]],
}];

cy.startMaskDrawing();
cy.drawMask(mask1);
cy.get('.cvat-brush-tools-continue').click();

cy.drawMask(mask2);
cy.get('.cvat-brush-tools-underlying-pixels').click();
cy.get('.cvat-brush-tools-underlying-pixels').should('have.class', 'cvat-brush-tools-active-tool');
cy.finishMaskDrawing();

cy.get('#cvat-objects-sidebar-state-item-2').within(() => {
cy.get('.cvat-object-item-button-hidden').click();
});

cy.get('.cvat-canvas-container').then(([$canvas]) => {
cy.wrap($canvas).trigger('mousemove', { clientX: 450, clientY: 250 });
cy.get('#cvat_canvas_shape_1').should('not.have.class', 'cvat_canvas_shape_activated');

cy.wrap($canvas).trigger('mousemove', { clientX: 550, clientY: 350 });
cy.get('#cvat_canvas_shape_1').should('have.class', 'cvat_canvas_shape_activated');
});

cy.startMaskDrawing();
cy.get('.cvat-brush-tools-underlying-pixels').click();
cy.get('.cvat-brush-tools-underlying-pixels').should('not.have.class', 'cvat-brush-tools-active-tool');
cy.finishMaskDrawing();
});
});

describe('Tests to make sure that empty masks cannot be created', () => {
Expand Down

0 comments on commit 522d48c

Please sign in to comment.