手把手教你玩轉黑白圖片上色【玩轉華為云】
我們都知道,有很多經典的老照片,受限于那個時代的技術,只能以黑白的形式傳世。盡管黑白照片別有一番風味,但是彩色照片有時候能給人更強的代入感。今天在這里為大家提供一種基于華為云ModelArts的AI技術,智能給黑白圖片上色的方法,下面就緊跟小編的步驟,一步一步的來實現吧!
實驗準備
華為云ModelArts平臺,如對該平臺不是很了解,請參考文檔信息成長地圖_AI開發平臺ModelArts_華為云 (huaweicloud.com) ,這些文檔資料可以讓你完成從AI小白到AI大神的蛻變!
實驗流程
一、實驗環境配置
點擊下方連接進入?實例感知圖像上色?的 JupyterLab 頁面,進入代碼操作界面
https://authoring-modelarts-cnnorth4.huaweicloud.com/console/lab?share-url-b64=aHR0cHM6Ly9jbm5vcnRoNC1tb2RlbGh1Yi1tb2RlbHMub2JzLmNuLW5vcnRoLTQubXlod2Nsb3Vkcy5jb20vaXRlbS83OTJjNzlhNS1jYzI5LTRhNjUtYmUzNy01MTRhYjk1OWE2OTIvMS4wLjAvSW5zdENvbG9yaXphdGlvbi5pcHluYg%3D%3D&galleryitemid=792c79a5-cc29-4a65-be37-514ab959a692
不要使用默認的配置信息,這里我們選擇GPU: 1*V100|CPU: 8核 64GB的環境,默認的信息在配置 Detectron2時,會報錯。
切換流程見下圖:
切換完成后,點擊確定
2、 實驗步驟
2.1下載代碼和數據
import os !wget https://obs-aigallery-zc.obs.cn-north-4.myhuaweicloud.com/clf/code/InstColorization.zip os.system('unzip InstColorization.zip')
選擇代碼后,按crtl + enter 鍵運行,或者點擊工具欄的開始按鈕
運行成功后,[*]會變成對應的[1]值,或者工具欄左下方的?Busy?變成?Idle
2.2安裝依賴庫
!gcc --version !pip install torch==1.5 torchvision==0.6 !pip install cython pyyaml==5.1 !pip install -U 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI' !pip install dominate==2.4.0 !pip install detectron2==0.1.3 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/index.html !pip install opencv-python !pip install scikit-image import torch, torchvision print(torch.__version__, torch.cuda.is_available())
2.3準備上色
2.3.1切換目錄
cd InstColorization/
2.3.2配置 Detectron2
from os.path import join, isfile, isdir from os import listdir import os os.environ["CUDA_VISIBLE_DEVICES"] = "0" from argparse import ArgumentParser import detectron2 from detectron2.utils.logger import setup_logger setup_logger() import numpy as np import cv2 # import some common detectron2 utilities from detectron2 import model_zoo from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg import torch cfg = get_cfg() cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml")) cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml") predictor = DefaultPredictor(cfg)
input_dir = "example" image_list = [f for f in listdir(input_dir) if isfile(join(input_dir, f))] output_npz_dir = "{0}_bbox".format(input_dir) if os.path.isdir(output_npz_dir) is False: print('Create path: {0}'.format(output_npz_dir)) os.makedirs(output_npz_dir)
for image_path in image_list: img = cv2.imread(join(input_dir, image_path)) lab_image = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) l_channel, a_channel, b_channel = cv2.split(lab_image) l_stack = np.stack([l_channel, l_channel, l_channel], axis=2) outputs = predictor(l_stack) save_path = join(output_npz_dir, image_path.split('.')[0]) pred_bbox = outputs["instances"].pred_boxes.to(torch.device('cpu')).tensor.numpy() pred_scores = outputs["instances"].scores.cpu().data.numpy() np.savez(save_path, bbox = pred_bbox, scores = pred_scores)
!ls example_bbox
2.4圖像上色
import sys import time from options.train_options import TestOptions from models import create_model import torch from tqdm import tqdm_notebook from fusion_dataset import Fusion_Testing_Dataset from util import util import multiprocessing multiprocessing.set_start_method('spawn', True) torch.backends.cudnn.benchmark = True sys.argv = [sys.argv[0]] opt = TestOptions().parse()
save_img_path = opt.results_img_dir if os.path.isdir(save_img_path) is False: print('Create path: {0}'.format(save_img_path)) os.makedirs(save_img_path) opt.batch_size = 1 dataset = Fusion_Testing_Dataset(opt, -1) dataset_loader = torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size) dataset_size = len(dataset) print('#Testing images = %d' % dataset_size)
model = create_model(opt) model.setup_to_test('coco_finetuned_mask_256_ffs')
count_empty = 0 for data_raw in tqdm_notebook(dataset_loader): data_raw['full_img'][0] = data_raw['full_img'][0].cuda() if data_raw['empty_box'][0] == 0: data_raw['cropped_img'][0] = data_raw['cropped_img'][0].cuda() box_info = data_raw['box_info'][0] box_info_2x = data_raw['box_info_2x'][0] box_info_4x = data_raw['box_info_4x'][0] box_info_8x = data_raw['box_info_8x'][0] cropped_data = util.get_colorization_data(data_raw['cropped_img'], opt, ab_thresh=0, p=opt.sample_p) full_img_data = util.get_colorization_data(data_raw['full_img'], opt, ab_thresh=0, p=opt.sample_p) model.set_input(cropped_data) model.set_fusion_input(full_img_data, [box_info, box_info_2x, box_info_4x, box_info_8x]) model.forward() else: count_empty += 1 full_img_data = util.get_colorization_data(data_raw['full_img'], opt, ab_thresh=0, p=opt.sample_p) model.set_forward_without_box(full_img_data) model.save_current_imgs(join(save_img_path, data_raw['file_id'][0] + '.png')) print('{0} images without bounding boxes'.format(count_empty))
2.5展示上色結果
def imshow(img): import IPython import cv2 _, ret = cv2.imencode('.jpg', img) i = IPython.display.Image(data=ret) IPython.display.display(i) img_name_list = ['000000022969', '000000023781', '000000046872', '000000050145'] # 修改對應索引使用不同圖片 0-3 show_index = 1 img = cv2.imread('example/'+img_name_list[show_index]+'.jpg') lab_image = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) l_channel, _, _ = cv2.split(lab_image) img = cv2.imread('results/'+img_name_list[show_index]+'.png') lab_image = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) _, a_pred, b_pred = cv2.split(lab_image) a_pred = cv2.resize(a_pred, (l_channel.shape[1], l_channel.shape[0])) b_pred = cv2.resize(b_pred, (l_channel.shape[1], l_channel.shape[0])) gray_color = np.ones_like(a_pred) * 128 gray_image = cv2.cvtColor(np.stack([l_channel, gray_color, gray_color], 2), cv2.COLOR_LAB2BGR) color_image = cv2.cvtColor(np.stack([l_channel, a_pred, b_pred], 2), cv2.COLOR_LAB2BGR) # save_img_path = 'results_origin/' # if os.path.isdir(save_img_path) is False: # print('Create path: {0}'.format(save_img_path)) # os.makedirs(save_img_path) # cv2.imwrite('results_origin/'+img_name_list[show_index]+'.png', color_image) imshow(np.concatenate([gray_image, color_image], 1))
到此為止,一個官方的示例,就算完成了!
如果想要給其他的黑白圖片上色,又該怎么完成呢?繼續往下看,嘿嘿
在2.5步驟中,有這樣幾句代碼,通過對比可以發現,原始圖集在example文件夾下,通過切換下標,可以給不同的圖片完成上色操作,
img_name_list = ['000000022969', '000000023781', '000000046872', '000000050145'] # 修改對應索引使用不同圖片 0-3 show_index = 1 img = cv2.imread('example/'+img_name_list[show_index]+'.jpg') lab_image = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) l_channel, _, _ = cv2.split(lab_image) img = cv2.imread('results/'+img_name_list[show_index]+'.png') lab_image = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) _, a_pred, b_pred = cv2.split(lab_image)
將本地圖片,通過菜單欄的upload files按鈕,上傳到example文件夾下
重復步驟2.3,2.4,2.5,其中2.5中需要修改照片的信息,將照片123加入數組中,同時指定上色圖片索引為4
def imshow(img): import IPython import cv2 _, ret = cv2.imencode('.jpg', img) i = IPython.display.Image(data=ret) IPython.display.display(i) img_name_list = ['000000022969', '000000023781', '000000046872', '000000050145', '123'] # 修改對應索引使用不同圖片 0-3 show_index = 4 img = cv2.imread('example/'+img_name_list[show_index]+'.jpg') lab_image = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) l_channel, _, _ = cv2.split(lab_image) img = cv2.imread('results/'+img_name_list[show_index]+'.png') lab_image = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) _, a_pred, b_pred = cv2.split(lab_image) a_pred = cv2.resize(a_pred, (l_channel.shape[1], l_channel.shape[0])) b_pred = cv2.resize(b_pred, (l_channel.shape[1], l_channel.shape[0])) gray_color = np.ones_like(a_pred) * 128 gray_image = cv2.cvtColor(np.stack([l_channel, gray_color, gray_color], 2), cv2.COLOR_LAB2BGR) color_image = cv2.cvtColor(np.stack([l_channel, a_pred, b_pred], 2), cv2.COLOR_LAB2BGR) # save_img_path = 'results_origin/' # if os.path.isdir(save_img_path) is False: # print('Create path: {0}'.format(save_img_path)) # os.makedirs(save_img_path) # cv2.imwrite('results_origin/'+img_name_list[show_index]+'.png', color_image) imshow(np.concatenate([gray_image, color_image], 1))
就能完整展示自己的上色圖片啦
AI開發平臺ModelArts
版權聲明:本文內容由網絡用戶投稿,版權歸原作者所有,本站不擁有其著作權,亦不承擔相應法律責任。如果您發現本站中有涉嫌抄襲或描述失實的內容,請聯系我們jiasou666@gmail.com 處理,核實后本網站將在24小時內刪除侵權內容。