From b0b6dadc00ec098a912b12689cec7a60140fd87e Mon Sep 17 00:00:00 2001 From: Tong Gao Date: Mon, 22 Aug 2022 14:13:22 +0800 Subject: [PATCH] [Fix] mmcv.utils -> mmengine.utils (#1295) * [Fix] mmcv.utils -> mmengine.utils * mmcv -> mmengine --- mmocr/evaluation/metrics/recog_metric.py | 4 ++-- mmocr/models/common/backbones/unet.py | 2 +- .../textdet/postprocessors/base_postprocessor.py | 6 +++--- .../postprocessors/base_textrecog_postprocessor.py | 4 ++-- mmocr/utils/__init__.py | 11 ++++------- mmocr/utils/collect_env.py | 4 ++-- mmocr/utils/fileio.py | 4 ++-- mmocr/utils/img_utils.py | 2 +- tools/analysis_tools/browse_dataset.py | 4 ++-- .../common/curvedsyntext_converter.py | 11 +++++------ .../dataset_converters/common/labelme_converter.py | 12 ++++++------ tools/dataset_converters/kie/closeset_to_openset.py | 4 ++-- tools/dataset_converters/textdet/bid_converter.py | 7 ++++--- .../dataset_converters/textdet/ctw1500_converter.py | 10 ++++++---- .../dataset_converters/textdet/detext_converter.py | 7 ++++--- tools/dataset_converters/textdet/funsd_converter.py | 7 ++++--- tools/dataset_converters/textdet/ic11_converter.py | 7 ++++--- tools/dataset_converters/textdet/ic13_converter.py | 8 +++++--- tools/dataset_converters/textdet/icdar_converter.py | 10 ++++++---- tools/dataset_converters/textdet/ilst_converter.py | 7 ++++--- tools/dataset_converters/textdet/imgur_converter.py | 3 ++- tools/dataset_converters/textdet/kaist_converter.py | 9 +++++---- tools/dataset_converters/textdet/lv_converter.py | 8 +++++--- tools/dataset_converters/textdet/mtwi_converter.py | 9 +++++---- tools/dataset_converters/textdet/naf_converter.py | 7 ++++--- tools/dataset_converters/textdet/rctw_converter.py | 9 +++++---- tools/dataset_converters/textdet/rects_converter.py | 8 ++++---- tools/dataset_converters/textdet/sroie_converter.py | 8 +++++--- .../textdet/synthtext_converter.py | 3 ++- .../textdet/totaltext_converter.py | 7 ++++--- .../dataset_converters/textdet/vintext_converter.py | 7 ++++--- tools/dataset_converters/textrecog/bid_converter.py | 7 ++++--- .../textrecog/cocotext_converter.py | 6 +++--- .../textrecog/detext_converter.py | 11 ++++++----- .../dataset_converters/textrecog/funsd_converter.py | 7 ++++--- .../textrecog/hiertext_converter.py | 7 ++++--- .../dataset_converters/textrecog/ilst_converter.py | 7 ++++--- .../dataset_converters/textrecog/imgur_converter.py | 3 ++- .../dataset_converters/textrecog/kaist_converter.py | 13 +++++++------ .../dataset_converters/textrecog/lsvt_converter.py | 6 +++--- .../dataset_converters/textrecog/mtwi_converter.py | 13 +++++++------ tools/dataset_converters/textrecog/naf_converter.py | 11 ++++++----- .../textrecog/openvino_converter.py | 3 +-- .../dataset_converters/textrecog/rctw_converter.py | 13 +++++++------ .../dataset_converters/textrecog/rects_converter.py | 12 ++++++------ .../dataset_converters/textrecog/sroie_converter.py | 8 +++++--- .../textrecog/synthtext_converter.py | 6 ++++-- .../textrecog/textocr_converter.py | 2 +- .../textrecog/totaltext_converter.py | 7 ++++--- .../textrecog/vintext_converter.py | 7 ++++--- 50 files changed, 196 insertions(+), 162 deletions(-) diff --git a/mmocr/evaluation/metrics/recog_metric.py b/mmocr/evaluation/metrics/recog_metric.py index eb2c99021..5ed0e0fcc 100644 --- a/mmocr/evaluation/metrics/recog_metric.py +++ b/mmocr/evaluation/metrics/recog_metric.py @@ -3,7 +3,7 @@ from difflib import SequenceMatcher from typing import Dict, Optional, Sequence, Union -import mmcv +import mmengine from mmengine.evaluator import BaseMetric from rapidfuzz.distance import Levenshtein @@ -45,7 +45,7 @@ def __init__(self, self.valid_symbol = re.compile(valid_symbol) if isinstance(mode, str): mode = [mode] - assert mmcv.is_seq_of(mode, str) + assert mmengine.is_seq_of(mode, str) assert set(mode).issubset( {'exact', 'ignore_case', 'ignore_case_symbol'}) self.mode = set(mode) diff --git a/mmocr/models/common/backbones/unet.py b/mmocr/models/common/backbones/unet.py index 4a53f1eb1..3d504cedc 100644 --- a/mmocr/models/common/backbones/unet.py +++ b/mmocr/models/common/backbones/unet.py @@ -3,8 +3,8 @@ import torch.nn as nn import torch.utils.checkpoint as cp from mmcv.cnn import ConvModule, build_norm_layer -from mmcv.utils.parrots_wrapper import _BatchNorm from mmengine.model import BaseModule +from mmengine.utils.parrots_wrapper import _BatchNorm from mmocr.registry import MODELS diff --git a/mmocr/models/textdet/postprocessors/base_postprocessor.py b/mmocr/models/textdet/postprocessors/base_postprocessor.py index cd125a111..f5f734656 100644 --- a/mmocr/models/textdet/postprocessors/base_postprocessor.py +++ b/mmocr/models/textdet/postprocessors/base_postprocessor.py @@ -2,7 +2,7 @@ from functools import partial from typing import Dict, List, Optional, Sequence, Tuple, Union -import mmcv +import mmengine import numpy as np from torch import Tensor @@ -138,10 +138,10 @@ def split_results( is a tensor, or a list of N lists of tensors if ``pred_results`` is a list of tensors. """ - assert isinstance(pred_results, Tensor) or mmcv.is_seq_of( + assert isinstance(pred_results, Tensor) or mmengine.is_seq_of( pred_results, Tensor) - if mmcv.is_seq_of(pred_results, Tensor): + if mmengine.is_seq_of(pred_results, Tensor): for i in range(1, len(pred_results)): assert pred_results[0].shape[0] == pred_results[i].shape[0], \ 'The first dimension of all tensors should be the same' diff --git a/mmocr/models/textrecog/postprocessors/base_textrecog_postprocessor.py b/mmocr/models/textrecog/postprocessors/base_textrecog_postprocessor.py index 2f1d7b7be..0119bd5c9 100644 --- a/mmocr/models/textrecog/postprocessors/base_textrecog_postprocessor.py +++ b/mmocr/models/textrecog/postprocessors/base_textrecog_postprocessor.py @@ -2,7 +2,7 @@ import warnings from typing import Dict, Optional, Sequence, Tuple, Union -import mmcv +import mmengine import torch from mmengine.data import LabelData @@ -48,7 +48,7 @@ def __init__(self, 'end': self.dictionary.end_idx, 'unknown': self.dictionary.unknown_idx, } - if not mmcv.is_list_of(ignore_chars, str): + if not mmengine.is_list_of(ignore_chars, str): raise TypeError('ignore_chars must be list of str') ignore_indexes = list() for ignore_char in ignore_chars: diff --git a/mmocr/utils/__init__.py b/mmocr/utils/__init__.py index 5c3ee2a53..2f878107a 100644 --- a/mmocr/utils/__init__.py +++ b/mmocr/utils/__init__.py @@ -1,6 +1,4 @@ # Copyright (c) OpenMMLab. All rights reserved. -from mmcv.utils import Registry, build_from_cfg - from .bbox_utils import (bbox2poly, bbox_center_distance, bbox_diag_distance, bezier2polygon, is_on_same_line, rescale_bboxes, stitch_boxes_into_lines) @@ -28,11 +26,10 @@ RecForwardResults, RecSampleList) __all__ = [ - 'Registry', 'build_from_cfg', 'collect_env', 'is_3dlist', 'is_type_list', - 'is_none_or_type', 'equal_len', 'is_2dlist', 'valid_boundary', - 'list_to_file', 'list_from_file', 'is_on_same_line', - 'stitch_boxes_into_lines', 'StringStripper', 'bezier2polygon', - 'sort_points', 'dump_ocr_data', 'recog_anno_to_imginfo', + 'collect_env', 'is_3dlist', 'is_type_list', 'is_none_or_type', 'equal_len', + 'is_2dlist', 'valid_boundary', 'list_to_file', 'list_from_file', + 'is_on_same_line', 'stitch_boxes_into_lines', 'StringStripper', + 'bezier2polygon', 'sort_points', 'dump_ocr_data', 'recog_anno_to_imginfo', 'rescale_polygons', 'rescale_polygon', 'rescale_bboxes', 'bbox2poly', 'crop_polygon', 'is_poly_inside_rect', 'poly2bbox', 'poly_intersection', 'poly_iou', 'poly_make_valid', 'poly_union', 'poly2shapely', diff --git a/mmocr/utils/collect_env.py b/mmocr/utils/collect_env.py index a8cb3c40c..07d7d3577 100644 --- a/mmocr/utils/collect_env.py +++ b/mmocr/utils/collect_env.py @@ -1,6 +1,6 @@ # Copyright (c) OpenMMLab. All rights reserved. -from mmcv.utils import collect_env as collect_base_env -from mmcv.utils import get_git_hash +from mmengine.utils import collect_env as collect_base_env +from mmengine.utils import get_git_hash import mmocr diff --git a/mmocr/utils/fileio.py b/mmocr/utils/fileio.py index bbc5c9538..d5651d844 100644 --- a/mmocr/utils/fileio.py +++ b/mmocr/utils/fileio.py @@ -1,7 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. import os -import mmcv +import mmengine def list_to_file(filename, lines): @@ -11,7 +11,7 @@ def list_to_file(filename, lines): filename (str): The output filename. It will be created/overwritten. lines (list(str)): Data to be written. """ - mmcv.mkdir_or_exist(os.path.dirname(filename)) + mmengine.mkdir_or_exist(os.path.dirname(filename)) with open(filename, 'w', encoding='utf-8') as fw: for line in lines: fw.write(f'{line}\n') diff --git a/mmocr/utils/img_utils.py b/mmocr/utils/img_utils.py index 102acf3d5..c96a05d25 100644 --- a/mmocr/utils/img_utils.py +++ b/mmocr/utils/img_utils.py @@ -1,7 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. import cv2 import numpy as np -from mmcv.utils import is_seq_of +from mmengine.utils import is_seq_of from shapely.geometry import LineString, Point from .bbox_utils import bbox_jitter diff --git a/tools/analysis_tools/browse_dataset.py b/tools/analysis_tools/browse_dataset.py index 320c0b6cf..16d6c3080 100644 --- a/tools/analysis_tools/browse_dataset.py +++ b/tools/analysis_tools/browse_dataset.py @@ -2,7 +2,7 @@ import argparse import os.path as osp -import mmcv +import mmengine from mmengine import Config, DictAction from mmocr.registry import DATASETS, VISUALIZERS @@ -50,7 +50,7 @@ def main(): dataset = DATASETS.build(cfg.train_dataloader.dataset) visualizer = VISUALIZERS.build(cfg.visualizer) - progress_bar = mmcv.ProgressBar(len(dataset)) + progress_bar = mmengine.ProgressBar(len(dataset)) for item in dataset: img = item['inputs'].permute(1, 2, 0).numpy() data_sample = item['data_sample'].numpy() diff --git a/tools/dataset_converters/common/curvedsyntext_converter.py b/tools/dataset_converters/common/curvedsyntext_converter.py index f125787c3..7a136c71f 100644 --- a/tools/dataset_converters/common/curvedsyntext_converter.py +++ b/tools/dataset_converters/common/curvedsyntext_converter.py @@ -3,7 +3,6 @@ import os.path as osp from functools import partial -import mmcv import mmengine import numpy as np @@ -84,14 +83,14 @@ def convert_annotations(data, start_img_id=start_img_id, start_ann_id=start_ann_id) if nproc > 1: - data['annotations'] = mmcv.track_parallel_progress( + data['annotations'] = mmengine.track_parallel_progress( modify_annotation_with_params, data['annotations'], nproc=nproc) - data['images'] = mmcv.track_parallel_progress( + data['images'] = mmengine.track_parallel_progress( modify_image_info_with_params, data['images'], nproc=nproc) else: - data['annotations'] = mmcv.track_progress( + data['annotations'] = mmengine.track_progress( modify_annotation_with_params, data['annotations']) - data['images'] = mmcv.track_progress( + data['images'] = mmengine.track_progress( modify_image_info_with_params, data['images'], ) @@ -103,7 +102,7 @@ def main(): args = parse_args() root_path = args.root_path out_dir = args.out_dir if args.out_dir else root_path - mmcv.mkdir_or_exist(out_dir) + mmengine.mkdir_or_exist(out_dir) anns = mmengine.load(osp.join(root_path, 'train1.json')) data1 = convert_annotations(anns, 'syntext_word_eng', args.num_sample, diff --git a/tools/dataset_converters/common/labelme_converter.py b/tools/dataset_converters/common/labelme_converter.py index 58ae17b0e..a03077bc2 100644 --- a/tools/dataset_converters/common/labelme_converter.py +++ b/tools/dataset_converters/common/labelme_converter.py @@ -33,7 +33,7 @@ def parse_labelme_json(json_file, src_img = mmcv.imread(img_full_path) img_basename = osp.splitext(img_file)[0] sub_dir = osp.join(out_dir, 'crops', img_basename) - mmcv.mkdir_or_exist(sub_dir) + mmengine.mkdir_or_exist(sub_dir) det_line_json_list = [] recog_crop_line_str_list = [] @@ -143,7 +143,7 @@ def process(json_dir, nproc=1, recog_format='jsonl', warp=False): - mmcv.mkdir_or_exist(out_dir) + mmengine.mkdir_or_exist(out_dir) json_file_list = glob.glob(osp.join(json_dir, '*.json')) @@ -156,10 +156,10 @@ def process(json_dir, warp_flag=warp) if nproc <= 1: - total_results = mmcv.track_progress(parse_labelme_json_func, - json_file_list) + total_results = mmengine.track_progress(parse_labelme_json_func, + json_file_list) else: - total_results = mmcv.track_parallel_progress( + total_results = mmengine.track_parallel_progress( parse_labelme_json_func, json_file_list, keep_order=True, @@ -174,7 +174,7 @@ def process(json_dir, total_recog_crop_line_str.extend(res[1]) total_recog_warp_line_str.extend(res[2]) - mmcv.mkdir_or_exist(out_dir) + mmengine.mkdir_or_exist(out_dir) det_out_file = osp.join(out_dir, 'instances_training.txt') list_to_file(det_out_file, total_det_line_json_list) diff --git a/tools/dataset_converters/kie/closeset_to_openset.py b/tools/dataset_converters/kie/closeset_to_openset.py index 4c2480bfa..2057e9797 100644 --- a/tools/dataset_converters/kie/closeset_to_openset.py +++ b/tools/dataset_converters/kie/closeset_to_openset.py @@ -3,7 +3,7 @@ import json from functools import partial -import mmcv +import mmengine from mmocr.utils import list_from_file, list_to_file @@ -89,7 +89,7 @@ def process(closeset_file, openset_file, merge_bg_others=False, n_proc=10): convert_func = partial(convert, merge_bg_others=merge_bg_others) - openset_lines = mmcv.track_parallel_progress( + openset_lines = mmengine.track_parallel_progress( convert_func, closeset_lines, nproc=n_proc) list_to_file(openset_file, openset_lines) diff --git a/tools/dataset_converters/textdet/bid_converter.py b/tools/dataset_converters/textdet/bid_converter.py index 40ef530d3..a16a3439e 100644 --- a/tools/dataset_converters/textdet/bid_converter.py +++ b/tools/dataset_converters/textdet/bid_converter.py @@ -4,6 +4,7 @@ import os.path as osp import mmcv +import mmengine from mmocr.utils import dump_ocr_data @@ -50,10 +51,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -164,7 +165,7 @@ def parse_args(): def main(): args = parse_args() root_path = args.root_path - with mmcv.Timer(print_tmpl='It takes {}s to convert BID annotation'): + with mmengine.Timer(print_tmpl='It takes {}s to convert BID annotation'): files = collect_files( osp.join(root_path, 'imgs'), osp.join(root_path, 'annotations')) image_infos = collect_annotations(files, nproc=args.nproc) diff --git a/tools/dataset_converters/textdet/ctw1500_converter.py b/tools/dataset_converters/textdet/ctw1500_converter.py index f50cb58cd..8a40ada0f 100644 --- a/tools/dataset_converters/textdet/ctw1500_converter.py +++ b/tools/dataset_converters/textdet/ctw1500_converter.py @@ -6,6 +6,7 @@ from functools import partial import mmcv +import mmengine import numpy as np from shapely.geometry import Polygon @@ -72,10 +73,10 @@ def collect_annotations(files, split, nproc=1): load_img_info_with_split = partial(load_img_info, split=split) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info_with_split, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info_with_split, files) + images = mmengine.track_progress(load_img_info_with_split, files) return images @@ -208,7 +209,7 @@ def main(): args = parse_args() root_path = args.root_path out_dir = args.out_dir if args.out_dir else root_path - mmcv.mkdir_or_exist(out_dir) + mmengine.mkdir_or_exist(out_dir) img_dir = osp.join(root_path, 'imgs') gt_dir = osp.join(root_path, 'annotations') @@ -220,7 +221,8 @@ def main(): for split, json_name in set_name.items(): print(f'Converting {split} into {json_name}') - with mmcv.Timer(print_tmpl='It takes {}s to convert icdar annotation'): + with mmengine.Timer( + print_tmpl='It takes {}s to convert icdar annotation'): files = collect_files( osp.join(img_dir, split), osp.join(gt_dir, split), split) image_infos = collect_annotations(files, split, nproc=args.nproc) diff --git a/tools/dataset_converters/textdet/detext_converter.py b/tools/dataset_converters/textdet/detext_converter.py index 09a00a6cb..d99378e44 100644 --- a/tools/dataset_converters/textdet/detext_converter.py +++ b/tools/dataset_converters/textdet/detext_converter.py @@ -4,6 +4,7 @@ import os.path as osp import mmcv +import mmengine import numpy as np from mmocr.utils import dump_ocr_data @@ -50,10 +51,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -146,7 +147,7 @@ def main(): for split in ['training', 'val']: print(f'Processing {split} set...') - with mmcv.Timer( + with mmengine.Timer( print_tmpl='It takes {}s to convert DeText annotation'): files = collect_files( osp.join(root_path, 'imgs', split), diff --git a/tools/dataset_converters/textdet/funsd_converter.py b/tools/dataset_converters/textdet/funsd_converter.py index dbb91bc7e..7be887d26 100644 --- a/tools/dataset_converters/textdet/funsd_converter.py +++ b/tools/dataset_converters/textdet/funsd_converter.py @@ -51,10 +51,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -144,7 +144,8 @@ def main(): for split in ['training', 'test']: print(f'Processing {split} set...') - with mmcv.Timer(print_tmpl='It takes {}s to convert FUNSD annotation'): + with mmengine.Timer( + print_tmpl='It takes {}s to convert FUNSD annotation'): files = collect_files( osp.join(root_path, 'imgs'), osp.join(root_path, 'annotations', split)) diff --git a/tools/dataset_converters/textdet/ic11_converter.py b/tools/dataset_converters/textdet/ic11_converter.py index 605779241..1a5683f4a 100644 --- a/tools/dataset_converters/textdet/ic11_converter.py +++ b/tools/dataset_converters/textdet/ic11_converter.py @@ -4,6 +4,7 @@ import os.path as osp import mmcv +import mmengine from PIL import Image from mmocr.utils import dump_ocr_data @@ -68,10 +69,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -158,7 +159,7 @@ def main(): for split in ['training', 'test']: print(f'Processing {split} set...') - with mmcv.Timer(print_tmpl='It takes {}s to convert annotation'): + with mmengine.Timer(print_tmpl='It takes {}s to convert annotation'): files = collect_files( osp.join(root_path, 'imgs', split), osp.join(root_path, 'annotations', split)) diff --git a/tools/dataset_converters/textdet/ic13_converter.py b/tools/dataset_converters/textdet/ic13_converter.py index ca583cd0e..bdf891c71 100644 --- a/tools/dataset_converters/textdet/ic13_converter.py +++ b/tools/dataset_converters/textdet/ic13_converter.py @@ -4,6 +4,7 @@ import os.path as osp import mmcv +import mmengine from mmocr.utils import dump_ocr_data @@ -51,10 +52,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -151,7 +152,8 @@ def main(): for split in ['training', 'test']: print(f'Processing {split} set...') - with mmcv.Timer(print_tmpl='It takes {}s to convert IC13 annotation'): + with mmengine.Timer( + print_tmpl='It takes {}s to convert IC13 annotation'): files = collect_files( osp.join(root_path, 'imgs', split), osp.join(root_path, 'annotations', split), split) diff --git a/tools/dataset_converters/textdet/icdar_converter.py b/tools/dataset_converters/textdet/icdar_converter.py index 10dc1041d..453aae7d3 100644 --- a/tools/dataset_converters/textdet/icdar_converter.py +++ b/tools/dataset_converters/textdet/icdar_converter.py @@ -5,6 +5,7 @@ from functools import partial import mmcv +import mmengine import numpy as np from shapely.geometry import Polygon @@ -62,10 +63,10 @@ def collect_annotations(files, dataset, nproc=1): load_img_info_with_dataset = partial(load_img_info, dataset=dataset) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info_with_dataset, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info_with_dataset, files) + images = mmengine.track_progress(load_img_info_with_dataset, files) return images @@ -159,7 +160,7 @@ def main(): args = parse_args() icdar_path = args.icdar_path out_dir = args.out_dir if args.out_dir else icdar_path - mmcv.mkdir_or_exist(out_dir) + mmengine.mkdir_or_exist(out_dir) img_dir = osp.join(icdar_path, 'imgs') gt_dir = osp.join(icdar_path, 'annotations') @@ -171,7 +172,8 @@ def main(): for split, json_name in set_name.items(): print(f'Converting {split} into {json_name}') - with mmcv.Timer(print_tmpl='It takes {}s to convert icdar annotation'): + with mmengine.Timer( + print_tmpl='It takes {}s to convert icdar annotation'): files = collect_files( osp.join(img_dir, split), osp.join(gt_dir, split)) image_infos = collect_annotations( diff --git a/tools/dataset_converters/textdet/ilst_converter.py b/tools/dataset_converters/textdet/ilst_converter.py index 9ce255f07..078d7e855 100644 --- a/tools/dataset_converters/textdet/ilst_converter.py +++ b/tools/dataset_converters/textdet/ilst_converter.py @@ -5,6 +5,7 @@ import xml.etree.ElementTree as ET import mmcv +import mmengine from mmocr.utils import dump_ocr_data @@ -52,10 +53,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -185,7 +186,7 @@ def parse_args(): def main(): args = parse_args() root_path = args.root_path - with mmcv.Timer(print_tmpl='It takes {}s to convert ILST annotation'): + with mmengine.Timer(print_tmpl='It takes {}s to convert ILST annotation'): files = collect_files( osp.join(root_path, 'imgs'), osp.join(root_path, 'annotations')) image_infos = collect_annotations(files, nproc=args.nproc) diff --git a/tools/dataset_converters/textdet/imgur_converter.py b/tools/dataset_converters/textdet/imgur_converter.py index 272ec35da..f6c19cd33 100644 --- a/tools/dataset_converters/textdet/imgur_converter.py +++ b/tools/dataset_converters/textdet/imgur_converter.py @@ -139,7 +139,8 @@ def main(): for split in ['train', 'val', 'test']: print(f'Processing {split} set...') - with mmcv.Timer(print_tmpl='It takes {}s to convert IMGUR annotation'): + with mmengine.Timer( + print_tmpl='It takes {}s to convert IMGUR annotation'): anno_infos = collect_imgur_info( root_path, f'imgur5k_annotations_{split}.json') dump_ocr_data(anno_infos, diff --git a/tools/dataset_converters/textdet/kaist_converter.py b/tools/dataset_converters/textdet/kaist_converter.py index b7389bf3d..3f95804d1 100644 --- a/tools/dataset_converters/textdet/kaist_converter.py +++ b/tools/dataset_converters/textdet/kaist_converter.py @@ -6,6 +6,7 @@ import xml.etree.ElementTree as ET import mmcv +import mmengine from mmocr.utils import dump_ocr_data @@ -66,10 +67,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -181,7 +182,7 @@ def main(): # Train set trn_infos = collect_annotations(trn_files, nproc=args.nproc) - with mmcv.Timer( + with mmengine.Timer( print_tmpl='It takes {}s to convert KAIST Training annotation'): dump_ocr_data(trn_infos, osp.join(root_path, 'instances_training.json'), @@ -190,7 +191,7 @@ def main(): # Val set if len(val_files) > 0: val_infos = collect_annotations(val_files, nproc=args.nproc) - with mmcv.Timer( + with mmengine.Timer( print_tmpl='It takes {}s to convert KAIST Val annotation'): dump_ocr_data(val_infos, osp.join(root_path, 'instances_val.json'), 'textdet') diff --git a/tools/dataset_converters/textdet/lv_converter.py b/tools/dataset_converters/textdet/lv_converter.py index 9cc7e9fe5..6efcc1431 100644 --- a/tools/dataset_converters/textdet/lv_converter.py +++ b/tools/dataset_converters/textdet/lv_converter.py @@ -5,6 +5,7 @@ import xml.etree.ElementTree as ET import mmcv +import mmengine from mmocr.utils import dump_ocr_data @@ -59,10 +60,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -168,7 +169,8 @@ def main(): for split in ['train', 'val', 'test']: print(f'Processing {split} set...') - with mmcv.Timer(print_tmpl='It takes {}s to convert LV annotation'): + with mmengine.Timer( + print_tmpl='It takes {}s to convert LV annotation'): files = collect_files(osp.join(root_path, 'imgs', split)) image_infos = collect_annotations(files, nproc=args.nproc) dump_ocr_data(image_infos, diff --git a/tools/dataset_converters/textdet/mtwi_converter.py b/tools/dataset_converters/textdet/mtwi_converter.py index 267e6becc..1c9fde3bf 100644 --- a/tools/dataset_converters/textdet/mtwi_converter.py +++ b/tools/dataset_converters/textdet/mtwi_converter.py @@ -6,6 +6,7 @@ import cv2 import mmcv +import mmengine from PIL import Image from mmocr.utils import dump_ocr_data @@ -79,10 +80,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -183,7 +184,7 @@ def main(): # Train set trn_infos = collect_annotations(trn_files, nproc=args.nproc) - with mmcv.Timer( + with mmengine.Timer( print_tmpl='It takes {}s to convert MTWI Training annotation'): dump_ocr_data(trn_infos, osp.join(root_path, 'instances_training.json'), @@ -192,7 +193,7 @@ def main(): # Val set if len(val_files) > 0: val_infos = collect_annotations(val_files, nproc=args.nproc) - with mmcv.Timer( + with mmengine.Timer( print_tmpl='It takes {}s to convert MTWI Val annotation'): dump_ocr_data(val_infos, osp.join(root_path, 'instances_val.json'), 'textdet') diff --git a/tools/dataset_converters/textdet/naf_converter.py b/tools/dataset_converters/textdet/naf_converter.py index 928176398..2e43c8fba 100644 --- a/tools/dataset_converters/textdet/naf_converter.py +++ b/tools/dataset_converters/textdet/naf_converter.py @@ -61,10 +61,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -182,7 +182,8 @@ def main(): split_info['val'] = split_info.pop('valid') for split in ['training', 'val', 'test']: print(f'Processing {split} set...') - with mmcv.Timer(print_tmpl='It takes {}s to convert NAF annotation'): + with mmengine.Timer( + print_tmpl='It takes {}s to convert NAF annotation'): files = collect_files( osp.join(root_path, 'imgs'), osp.join(root_path, 'annotations'), split_info[split]) diff --git a/tools/dataset_converters/textdet/rctw_converter.py b/tools/dataset_converters/textdet/rctw_converter.py index be948f2a0..cc46dd859 100644 --- a/tools/dataset_converters/textdet/rctw_converter.py +++ b/tools/dataset_converters/textdet/rctw_converter.py @@ -5,6 +5,7 @@ import os.path as osp import mmcv +import mmengine from mmocr.utils import dump_ocr_data @@ -63,10 +64,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -172,7 +173,7 @@ def main(): osp.join(root_path, 'imgs'), osp.join(root_path, 'annotations'), ratio) # Train set - with mmcv.Timer( + with mmengine.Timer( print_tmpl='It takes {}s to convert RCTW Training annotation'): trn_infos = collect_annotations(trn_files, nproc=args.nproc) dump_ocr_data(trn_infos, osp.join(root_path, @@ -181,7 +182,7 @@ def main(): # Val set if len(val_files) > 0: - with mmcv.Timer( + with mmengine.Timer( print_tmpl='It takes {}s to convert RCTW Val annotation'): val_infos = collect_annotations(val_files, nproc=args.nproc) dump_ocr_data(val_infos, osp.join(root_path, 'instances_val.json'), diff --git a/tools/dataset_converters/textdet/rects_converter.py b/tools/dataset_converters/textdet/rects_converter.py index 16971a776..75f3b7346 100644 --- a/tools/dataset_converters/textdet/rects_converter.py +++ b/tools/dataset_converters/textdet/rects_converter.py @@ -64,10 +64,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -188,7 +188,7 @@ def main(): # Train set trn_infos = collect_annotations(trn_files, nproc=args.nproc) - with mmcv.Timer( + with mmengine.Timer( print_tmpl='It takes {}s to convert ReCTS Training annotation'): dump_ocr_data(trn_infos, osp.join(root_path, 'instances_training.json'), @@ -197,7 +197,7 @@ def main(): # Val set if len(val_files) > 0: val_infos = collect_annotations(val_files, nproc=args.nproc) - with mmcv.Timer( + with mmengine.Timer( print_tmpl='It takes {}s to convert ReCTS Val annotation'): dump_ocr_data(val_infos, osp.join(root_path, 'instances_val.json'), 'textdet') diff --git a/tools/dataset_converters/textdet/sroie_converter.py b/tools/dataset_converters/textdet/sroie_converter.py index 90733623e..7ee0725e2 100644 --- a/tools/dataset_converters/textdet/sroie_converter.py +++ b/tools/dataset_converters/textdet/sroie_converter.py @@ -4,6 +4,7 @@ import os.path as osp import mmcv +import mmengine import numpy as np from mmocr.utils import dump_ocr_data @@ -56,10 +57,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -152,7 +153,8 @@ def main(): for split in ['training', 'test']: print(f'Processing {split} set...') - with mmcv.Timer(print_tmpl='It takes {}s to convert SROIE annotation'): + with mmengine.Timer( + print_tmpl='It takes {}s to convert SROIE annotation'): files = collect_files( osp.join(root_path, 'imgs', split), osp.join(root_path, 'annotations', split)) diff --git a/tools/dataset_converters/textdet/synthtext_converter.py b/tools/dataset_converters/textdet/synthtext_converter.py index 59a3472a3..811b1cc0e 100644 --- a/tools/dataset_converters/textdet/synthtext_converter.py +++ b/tools/dataset_converters/textdet/synthtext_converter.py @@ -6,6 +6,7 @@ import lmdb import mmcv +import mmengine import numpy as np from scipy.io import loadmat from shapely.geometry import Polygon @@ -169,7 +170,7 @@ def main(): args = parse_args() synthtext_path = args.synthtext_path out_dir = args.out_dir if args.out_dir else synthtext_path - mmcv.mkdir_or_exist(out_dir) + mmengine.mkdir_or_exist(out_dir) gt_name = osp.join(synthtext_path, 'gt.mat') lmdb_name = 'synthtext.lmdb' diff --git a/tools/dataset_converters/textdet/totaltext_converter.py b/tools/dataset_converters/textdet/totaltext_converter.py index b8c3bb494..75e4cacc9 100644 --- a/tools/dataset_converters/textdet/totaltext_converter.py +++ b/tools/dataset_converters/textdet/totaltext_converter.py @@ -7,6 +7,7 @@ import cv2 import mmcv +import mmengine import numpy as np import scipy.io as scio import yaml @@ -64,10 +65,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -396,7 +397,7 @@ def main(): for split, json_name in set_name.items(): print(f'Converting {split} into {json_name}') - with mmcv.Timer( + with mmengine.Timer( print_tmpl='It takes {}s to convert totaltext annotation'): files = collect_files( osp.join(img_dir, split), osp.join(gt_dir, split)) diff --git a/tools/dataset_converters/textdet/vintext_converter.py b/tools/dataset_converters/textdet/vintext_converter.py index 994b7a1e8..fb7a364d9 100644 --- a/tools/dataset_converters/textdet/vintext_converter.py +++ b/tools/dataset_converters/textdet/vintext_converter.py @@ -4,6 +4,7 @@ import os.path as osp import mmcv +import mmengine from mmocr.utils import dump_ocr_data @@ -50,10 +51,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -155,7 +156,7 @@ def main(): root_path = args.root_path for split in ['training', 'test', 'unseen_test']: print(f'Processing {split} set...') - with mmcv.Timer( + with mmengine.Timer( print_tmpl='It takes {}s to convert VinText annotation'): files = collect_files( osp.join(root_path, 'imgs', split), diff --git a/tools/dataset_converters/textrecog/bid_converter.py b/tools/dataset_converters/textrecog/bid_converter.py index 564ee0d00..42c376855 100644 --- a/tools/dataset_converters/textrecog/bid_converter.py +++ b/tools/dataset_converters/textrecog/bid_converter.py @@ -5,6 +5,7 @@ import os.path as osp import mmcv +import mmengine from mmocr.utils.fileio import list_to_file from mmocr.utils.img_utils import crop_img @@ -52,10 +53,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -244,7 +245,7 @@ def parse_args(): def main(): args = parse_args() root_path = args.root_path - with mmcv.Timer(print_tmpl='It takes {}s to convert BID annotation'): + with mmengine.Timer(print_tmpl='It takes {}s to convert BID annotation'): files = collect_files( osp.join(root_path, 'imgs'), osp.join(root_path, 'annotations')) image_infos = collect_annotations(files, nproc=args.nproc) diff --git a/tools/dataset_converters/textrecog/cocotext_converter.py b/tools/dataset_converters/textrecog/cocotext_converter.py index 335bbea83..c4676b111 100644 --- a/tools/dataset_converters/textrecog/cocotext_converter.py +++ b/tools/dataset_converters/textrecog/cocotext_converter.py @@ -138,8 +138,8 @@ def convert_cocotext(root_path, dst_image_root = osp.join(root_path, 'crops', split) ignore_image_root = osp.join(root_path, 'ignores', split) src_image_root = osp.join(root_path, 'imgs') - mmcv.mkdir_or_exist(dst_image_root) - mmcv.mkdir_or_exist(ignore_image_root) + mmengine.mkdir_or_exist(dst_image_root) + mmengine.mkdir_or_exist(ignore_image_root) process_img_with_path = partial( process_img, @@ -155,7 +155,7 @@ def convert_cocotext(root_path, ann_ids = annotation['imgToAnns'][str(img_info['id'])] anns = [annotation['anns'][str(ann_id)] for ann_id in ann_ids] tasks.append((img_idx + img_start_idx, img_info, anns)) - labels_list = mmcv.track_parallel_progress( + labels_list = mmengine.track_parallel_progress( process_img_with_path, tasks, keep_order=True, nproc=nproc) final_labels = [] for label_list in labels_list: diff --git a/tools/dataset_converters/textrecog/detext_converter.py b/tools/dataset_converters/textrecog/detext_converter.py index dd5cb1932..83a69a050 100644 --- a/tools/dataset_converters/textrecog/detext_converter.py +++ b/tools/dataset_converters/textrecog/detext_converter.py @@ -5,6 +5,7 @@ import os.path as osp import mmcv +import mmengine import numpy as np from mmocr.utils.fileio import list_to_file @@ -52,10 +53,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -141,8 +142,8 @@ def generate_ann(root_path, split, image_infos, preserve_vertical, format): dst_label_file = osp.join(root_path, f'train_label.{format}') elif split == 'val': dst_label_file = osp.join(root_path, f'val_label.{format}') - mmcv.mkdir_or_exist(dst_image_root) - mmcv.mkdir_or_exist(ignore_image_root) + mmengine.mkdir_or_exist(dst_image_root) + mmengine.mkdir_or_exist(ignore_image_root) lines = [] for image_info in image_infos: @@ -211,7 +212,7 @@ def main(): for split in ['training', 'val']: print(f'Processing {split} set...') - with mmcv.Timer( + with mmengine.Timer( print_tmpl='It takes {}s to convert DeText annotation'): files = collect_files( osp.join(root_path, 'imgs', split), diff --git a/tools/dataset_converters/textrecog/funsd_converter.py b/tools/dataset_converters/textrecog/funsd_converter.py index bc8c357fe..5498beebc 100644 --- a/tools/dataset_converters/textrecog/funsd_converter.py +++ b/tools/dataset_converters/textrecog/funsd_converter.py @@ -53,10 +53,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -212,7 +212,8 @@ def main(): for split in ['training', 'test']: print(f'Processing {split} set...') - with mmcv.Timer(print_tmpl='It takes {}s to convert FUNSD annotation'): + with mmengine.Timer( + print_tmpl='It takes {}s to convert FUNSD annotation'): files = collect_files( osp.join(root_path, 'imgs'), osp.join(root_path, 'annotations', split)) diff --git a/tools/dataset_converters/textrecog/hiertext_converter.py b/tools/dataset_converters/textrecog/hiertext_converter.py index c9e4c5241..1ed924d46 100644 --- a/tools/dataset_converters/textrecog/hiertext_converter.py +++ b/tools/dataset_converters/textrecog/hiertext_converter.py @@ -6,6 +6,7 @@ from functools import partial import mmcv +import mmengine import numpy as np from shapely.geometry import Polygon @@ -180,8 +181,8 @@ def convert_hiertext( dst_image_root = osp.join(root_path, 'crops', split) ignore_image_root = osp.join(root_path, 'ignores', split) src_image_root = osp.join(root_path, 'imgs', split) - mmcv.mkdir_or_exist(dst_image_root) - mmcv.mkdir_or_exist(ignore_image_root) + mmengine.mkdir_or_exist(dst_image_root) + mmengine.mkdir_or_exist(ignore_image_root) process_img_with_path = partial( process_img, @@ -195,7 +196,7 @@ def convert_hiertext( tasks = [] for img_idx, img_info in enumerate(annotation): tasks.append((img_idx, img_info)) - labels_list = mmcv.track_parallel_progress( + labels_list = mmengine.track_parallel_progress( process_img_with_path, tasks, keep_order=True, nproc=nproc) final_labels = [] diff --git a/tools/dataset_converters/textrecog/ilst_converter.py b/tools/dataset_converters/textrecog/ilst_converter.py index 6a253f3ae..237a82453 100644 --- a/tools/dataset_converters/textrecog/ilst_converter.py +++ b/tools/dataset_converters/textrecog/ilst_converter.py @@ -6,6 +6,7 @@ import xml.etree.ElementTree as ET import mmcv +import mmengine from mmocr.utils.fileio import list_to_file from mmocr.utils.img_utils import crop_img @@ -54,10 +55,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -258,7 +259,7 @@ def parse_args(): def main(): args = parse_args() root_path = args.root_path - with mmcv.Timer(print_tmpl='It takes {}s to convert ILST annotation'): + with mmengine.Timer(print_tmpl='It takes {}s to convert ILST annotation'): files = collect_files( osp.join(root_path, 'imgs'), osp.join(root_path, 'annotations')) image_infos = collect_annotations(files, nproc=args.nproc) diff --git a/tools/dataset_converters/textrecog/imgur_converter.py b/tools/dataset_converters/textrecog/imgur_converter.py index fbccc391f..43ef56efb 100644 --- a/tools/dataset_converters/textrecog/imgur_converter.py +++ b/tools/dataset_converters/textrecog/imgur_converter.py @@ -177,7 +177,8 @@ def main(): for split in ['train', 'val', 'test']: print(f'Processing {split} set...') - with mmcv.Timer(print_tmpl='It takes {}s to convert IMGUR annotation'): + with mmengine.Timer( + print_tmpl='It takes {}s to convert IMGUR annotation'): anno_infos = collect_imgur_info( root_path, f'imgur5k_annotations_{split}.json') generate_ann(root_path, split, anno_infos, args.format) diff --git a/tools/dataset_converters/textrecog/kaist_converter.py b/tools/dataset_converters/textrecog/kaist_converter.py index fb4882d48..afbe54847 100644 --- a/tools/dataset_converters/textrecog/kaist_converter.py +++ b/tools/dataset_converters/textrecog/kaist_converter.py @@ -7,6 +7,7 @@ import xml.etree.ElementTree as ET import mmcv +import mmengine from mmocr.utils.fileio import list_to_file from mmocr.utils.img_utils import crop_img @@ -68,10 +69,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -178,8 +179,8 @@ def generate_ann(root_path, split, image_infos, preserve_vertical, format): dst_label_file = osp.join(root_path, f'train_label.{format}') elif split == 'val': dst_label_file = osp.join(root_path, f'val_label.{format}') - mmcv.mkdir_or_exist(dst_image_root) - mmcv.mkdir_or_exist(ignore_image_root) + mmengine.mkdir_or_exist(dst_image_root) + mmengine.mkdir_or_exist(ignore_image_root) lines = [] for image_info in image_infos: @@ -255,7 +256,7 @@ def main(): # Train set trn_infos = collect_annotations(trn_files, nproc=args.nproc) - with mmcv.Timer( + with mmengine.Timer( print_tmpl='It takes {}s to convert KAIST Training annotation'): generate_ann(root_path, 'training', trn_infos, args.preserve_vertical, args.format) @@ -263,7 +264,7 @@ def main(): # Val set if len(val_files) > 0: val_infos = collect_annotations(val_files, nproc=args.nproc) - with mmcv.Timer( + with mmengine.Timer( print_tmpl='It takes {}s to convert KAIST Val annotation'): generate_ann(root_path, 'val', val_infos, args.preserve_vertical, args.format) diff --git a/tools/dataset_converters/textrecog/lsvt_converter.py b/tools/dataset_converters/textrecog/lsvt_converter.py index f3243cd1b..258ebd2cc 100644 --- a/tools/dataset_converters/textrecog/lsvt_converter.py +++ b/tools/dataset_converters/textrecog/lsvt_converter.py @@ -121,8 +121,8 @@ def convert_lsvt(root_path, dst_image_root = osp.join(root_path, 'crops', split) ignore_image_root = osp.join(root_path, 'ignores', split) src_image_root = osp.join(root_path, 'imgs') - mmcv.mkdir_or_exist(dst_image_root) - mmcv.mkdir_or_exist(ignore_image_root) + mmengine.mkdir_or_exist(dst_image_root) + mmengine.mkdir_or_exist(ignore_image_root) process_img_with_path = partial( process_img, @@ -163,7 +163,7 @@ def convert_lsvt(root_path, tasks.append((img_idx + img_start_idx, img_info, annotation[prefix])) idx = idx + 1 - labels_list = mmcv.track_parallel_progress( + labels_list = mmengine.track_parallel_progress( process_img_with_path, tasks, keep_order=True, nproc=nproc) final_labels = [] for label_list in labels_list: diff --git a/tools/dataset_converters/textrecog/mtwi_converter.py b/tools/dataset_converters/textrecog/mtwi_converter.py index 32a33e2f3..fba62ce75 100644 --- a/tools/dataset_converters/textrecog/mtwi_converter.py +++ b/tools/dataset_converters/textrecog/mtwi_converter.py @@ -7,6 +7,7 @@ import cv2 import mmcv +import mmengine from PIL import Image from mmocr.utils.fileio import list_to_file @@ -81,10 +82,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -172,8 +173,8 @@ def generate_ann(root_path, split, image_infos, preserve_vertical, format): dst_label_file = osp.join(root_path, f'train_label.{format}') elif split == 'val': dst_label_file = osp.join(root_path, f'val_label.{format}') - mmcv.mkdir_or_exist(dst_image_root) - mmcv.mkdir_or_exist(ignore_image_root) + mmengine.mkdir_or_exist(dst_image_root) + mmengine.mkdir_or_exist(ignore_image_root) lines = [] for image_info in image_infos: @@ -249,7 +250,7 @@ def main(): # Train set trn_infos = collect_annotations(trn_files, nproc=args.nproc) - with mmcv.Timer( + with mmengine.Timer( print_tmpl='It takes {}s to convert MTWI Training annotation'): generate_ann(root_path, 'training', trn_infos, args.preserve_vertical, args.format) @@ -257,7 +258,7 @@ def main(): # Val set if len(val_files) > 0: val_infos = collect_annotations(val_files, nproc=args.nproc) - with mmcv.Timer( + with mmengine.Timer( print_tmpl='It takes {}s to convert MTWI Val annotation'): generate_ann(root_path, 'val', val_infos, args.preserve_vertical, args.format) diff --git a/tools/dataset_converters/textrecog/naf_converter.py b/tools/dataset_converters/textrecog/naf_converter.py index 867f58591..577f1f545 100644 --- a/tools/dataset_converters/textrecog/naf_converter.py +++ b/tools/dataset_converters/textrecog/naf_converter.py @@ -64,10 +64,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -194,8 +194,8 @@ def generate_ann(root_path, split, image_infos, preserve_vertical, format): dst_label_file = osp.join(root_path, f'test_label.{format}') else: raise NotImplementedError - mmcv.mkdir_or_exist(dst_image_root) - mmcv.mkdir_or_exist(ignore_image_root) + mmengine.mkdir_or_exist(dst_image_root) + mmengine.mkdir_or_exist(ignore_image_root) lines = [] for image_info in image_infos: @@ -274,7 +274,8 @@ def main(): split_info['val'] = split_info.pop('valid') for split in ['training', 'val', 'test']: print(f'Processing {split} set...') - with mmcv.Timer(print_tmpl='It takes {}s to convert NAF annotation'): + with mmengine.Timer( + print_tmpl='It takes {}s to convert NAF annotation'): files = collect_files( osp.join(root_path, 'imgs'), osp.join(root_path, 'annotations'), split_info[split]) diff --git a/tools/dataset_converters/textrecog/openvino_converter.py b/tools/dataset_converters/textrecog/openvino_converter.py index 0ac389100..3d98c428f 100644 --- a/tools/dataset_converters/textrecog/openvino_converter.py +++ b/tools/dataset_converters/textrecog/openvino_converter.py @@ -5,7 +5,6 @@ from argparse import ArgumentParser from functools import partial -import mmcv import mmengine from PIL import Image @@ -80,7 +79,7 @@ def convert_openimages(root_path, anns.setdefault(ann['image_id'], []).append(ann) for img_idx, img_info in enumerate(annotation['images']): tasks.append((img_idx + img_start_idx, img_info, anns[img_info['id']])) - labels_list = mmcv.track_parallel_progress( + labels_list = mmengine.track_parallel_progress( process_img_with_path, tasks, keep_order=True, nproc=nproc) final_labels = [] for label_list in labels_list: diff --git a/tools/dataset_converters/textrecog/rctw_converter.py b/tools/dataset_converters/textrecog/rctw_converter.py index 7d6bb4585..2b0a07376 100644 --- a/tools/dataset_converters/textrecog/rctw_converter.py +++ b/tools/dataset_converters/textrecog/rctw_converter.py @@ -6,6 +6,7 @@ import os.path as osp import mmcv +import mmengine from mmocr.utils.fileio import list_to_file from mmocr.utils.img_utils import crop_img @@ -65,10 +66,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -159,8 +160,8 @@ def generate_ann(root_path, split, image_infos, preserve_vertical, format): dst_label_file = osp.join(root_path, f'train_label.{format}') elif split == 'val': dst_label_file = osp.join(root_path, f'val_label.{format}') - mmcv.mkdir_or_exist(dst_image_root) - mmcv.mkdir_or_exist(ignore_image_root) + mmengine.mkdir_or_exist(dst_image_root) + mmengine.mkdir_or_exist(ignore_image_root) lines = [] for image_info in image_infos: @@ -233,7 +234,7 @@ def main(): osp.join(root_path, 'imgs'), osp.join(root_path, 'annotations'), ratio) # Train set - with mmcv.Timer( + with mmengine.Timer( print_tmpl='It takes {}s to convert RCTW Training annotation'): trn_infos = collect_annotations(trn_files, nproc=args.nproc) generate_ann(root_path, 'training', trn_infos, args.preserve_vertical, @@ -241,7 +242,7 @@ def main(): # Val set if len(val_files) > 0: - with mmcv.Timer( + with mmengine.Timer( print_tmpl='It takes {}s to convert RCTW Val annotation'): val_infos = collect_annotations(val_files, nproc=args.nproc) generate_ann(root_path, 'val', val_infos, args.preserve_vertical, diff --git a/tools/dataset_converters/textrecog/rects_converter.py b/tools/dataset_converters/textrecog/rects_converter.py index 6a1d1e78b..206cd926e 100644 --- a/tools/dataset_converters/textrecog/rects_converter.py +++ b/tools/dataset_converters/textrecog/rects_converter.py @@ -66,10 +66,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -178,8 +178,8 @@ def generate_ann(root_path, split, image_infos, preserve_vertical, format): dst_label_file = osp.join(root_path, f'train_label.{format}') elif split == 'val': dst_label_file = osp.join(root_path, f'val_label.{format}') - mmcv.mkdir_or_exist(dst_image_root) - mmcv.mkdir_or_exist(ignore_image_root) + mmengine.mkdir_or_exist(dst_image_root) + mmengine.mkdir_or_exist(ignore_image_root) lines = [] for image_info in image_infos: @@ -255,7 +255,7 @@ def main(): # Train set trn_infos = collect_annotations(trn_files, nproc=args.nproc) - with mmcv.Timer( + with mmengine.Timer( print_tmpl='It takes {}s to convert ReCTS Training annotation'): generate_ann(root_path, 'training', trn_infos, args.preserve_vertical, args.format) @@ -263,7 +263,7 @@ def main(): # Val set if len(val_files) > 0: val_infos = collect_annotations(val_files, nproc=args.nproc) - with mmcv.Timer( + with mmengine.Timer( print_tmpl='It takes {}s to convert ReCTS Val annotation'): generate_ann(root_path, 'val', val_infos, args.preserve_vertical, args.format) diff --git a/tools/dataset_converters/textrecog/sroie_converter.py b/tools/dataset_converters/textrecog/sroie_converter.py index 687f96b8e..c999610fc 100644 --- a/tools/dataset_converters/textrecog/sroie_converter.py +++ b/tools/dataset_converters/textrecog/sroie_converter.py @@ -5,6 +5,7 @@ import os.path as osp import mmcv +import mmengine import numpy as np from mmocr.utils.fileio import list_to_file @@ -58,10 +59,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -207,7 +208,8 @@ def main(): for split in ['training', 'test']: print(f'Processing {split} set...') - with mmcv.Timer(print_tmpl='It takes {}s to convert SROIE annotation'): + with mmengine.Timer( + print_tmpl='It takes {}s to convert SROIE annotation'): files = collect_files( osp.join(root_path, 'imgs', split), osp.join(root_path, 'annotations', split)) diff --git a/tools/dataset_converters/textrecog/synthtext_converter.py b/tools/dataset_converters/textrecog/synthtext_converter.py index aa004926a..6b33a0c24 100644 --- a/tools/dataset_converters/textrecog/synthtext_converter.py +++ b/tools/dataset_converters/textrecog/synthtext_converter.py @@ -4,6 +4,7 @@ from functools import partial import mmcv +import mmengine import numpy as np from scipy.io import loadmat @@ -80,7 +81,7 @@ def load_gt_data(filename, n_proc): txt = mat_data['txt'] wordBB = mat_data['wordBB'] charBB = mat_data['charBB'] - return mmcv.track_parallel_progress( + return mmengine.track_parallel_progress( load_gt_datum, list(zip(imnames, txt, wordBB, charBB)), nproc=n_proc) @@ -136,7 +137,8 @@ def main(): process_with_outdir = partial( process, img_path_prefix=args.img_path, out_dir=args.out_dir) print('Creating cropped images and gold labels...') - mmcv.track_parallel_progress(process_with_outdir, data, nproc=args.n_proc) + mmengine.track_parallel_progress( + process_with_outdir, data, nproc=args.n_proc) print('Done') diff --git a/tools/dataset_converters/textrecog/textocr_converter.py b/tools/dataset_converters/textrecog/textocr_converter.py index 19fe907cf..f47ff52d3 100644 --- a/tools/dataset_converters/textrecog/textocr_converter.py +++ b/tools/dataset_converters/textrecog/textocr_converter.py @@ -75,7 +75,7 @@ def convert_textocr(root_path, ann_ids = annotation['imgToAnns'][img_info['id']] anns = [annotation['anns'][ann_id] for ann_id in ann_ids] tasks.append((img_idx + img_start_idx, img_info, anns)) - labels_list = mmcv.track_parallel_progress( + labels_list = mmengine.track_parallel_progress( process_img_with_path, tasks, keep_order=True, nproc=nproc) final_labels = [] for label_list in labels_list: diff --git a/tools/dataset_converters/textrecog/totaltext_converter.py b/tools/dataset_converters/textrecog/totaltext_converter.py index f027424c4..6d9817274 100644 --- a/tools/dataset_converters/textrecog/totaltext_converter.py +++ b/tools/dataset_converters/textrecog/totaltext_converter.py @@ -6,6 +6,7 @@ import re import mmcv +import mmengine import numpy as np import scipy.io as scio import yaml @@ -65,10 +66,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -371,7 +372,7 @@ def main(): for split, ann_name in set_name.items(): print(f'Converting {split} into {ann_name}') - with mmcv.Timer( + with mmengine.Timer( print_tmpl='It takes {}s to convert totaltext annotation'): files = collect_files( osp.join(img_dir, split), osp.join(gt_dir, split)) diff --git a/tools/dataset_converters/textrecog/vintext_converter.py b/tools/dataset_converters/textrecog/vintext_converter.py index ed0f901b9..a2355b9a0 100644 --- a/tools/dataset_converters/textrecog/vintext_converter.py +++ b/tools/dataset_converters/textrecog/vintext_converter.py @@ -5,6 +5,7 @@ import os.path as osp import mmcv +import mmengine from mmocr.utils.fileio import list_to_file from mmocr.utils.img_utils import crop_img @@ -52,10 +53,10 @@ def collect_annotations(files, nproc=1): assert isinstance(nproc, int) if nproc > 1: - images = mmcv.track_parallel_progress( + images = mmengine.track_parallel_progress( load_img_info, files, nproc=nproc) else: - images = mmcv.track_progress(load_img_info, files) + images = mmengine.track_progress(load_img_info, files) return images @@ -217,7 +218,7 @@ def main(): root_path = args.root_path for split in ['training', 'test', 'unseen_test']: print(f'Processing {split} set...') - with mmcv.Timer( + with mmengine.Timer( print_tmpl='It takes {}s to convert VinText annotation'): files = collect_files( osp.join(root_path, 'imgs', split),