2018-02-11 03:19:48 +00:00
|
|
|
import numpy as np
|
|
|
|
import tensorflow as tf
|
|
|
|
from PIL import Image
|
|
|
|
import tqdm
|
|
|
|
import os
|
|
|
|
import matplotlib.pyplot as plt
|
2018-06-15 14:31:00 +00:00
|
|
|
import stat
|
2018-02-11 03:19:48 +00:00
|
|
|
import sys
|
|
|
|
sys.path.append('..')
|
2018-02-26 15:59:14 +00:00
|
|
|
|
2018-02-11 03:19:48 +00:00
|
|
|
from model import Model
|
2018-02-11 18:57:44 +00:00
|
|
|
from poisson_blend import blend
|
2018-02-26 15:59:14 +00:00
|
|
|
from config import *
|
2018-06-15 14:31:00 +00:00
|
|
|
import shape_detect as sd
|
2018-02-11 03:19:48 +00:00
|
|
|
|
2018-02-28 04:15:57 +00:00
|
|
|
#TODO: allow variable batch sizes when decensoring. changing BATCH_SIZE will likely result in crashing
|
2018-02-11 18:57:44 +00:00
|
|
|
BATCH_SIZE = 1
|
2018-02-11 03:19:48 +00:00
|
|
|
|
2018-03-15 01:04:45 +00:00
|
|
|
mask_color = [args.mask_color_red, args.mask_color_green, args.mask_color_blue]
|
2018-02-12 17:21:10 +00:00
|
|
|
poisson_blending_enabled = False
|
2018-02-11 03:19:48 +00:00
|
|
|
|
2018-06-15 14:31:00 +00:00
|
|
|
def is_file(file):
|
|
|
|
try:
|
|
|
|
return not stat.S_ISDIR(os.stat(file).st_mode)
|
|
|
|
except:
|
|
|
|
return False
|
|
|
|
|
|
|
|
def get_files(dir):
|
|
|
|
all_files = os.listdir(dir)
|
|
|
|
filtered_files = list(filter(lambda file: is_file(os.path.join(dir, file)), all_files))
|
|
|
|
return filtered_files
|
|
|
|
|
|
|
|
def find_censor_boxes(image_path):
|
|
|
|
(image, boxes) = sd.process_image_path(image_path, tuple(mask_color))
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
for (box_image, cx, cy) in boxes:
|
|
|
|
pil_box_image = sd.cv_to_pillow(box_image)
|
|
|
|
boxes[i] = (pil_box_image, cx, cy)
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
# boxes = map(lambda box: (sd.cv_to_pillow(box[0]), box[1], box[2]), boxes)
|
|
|
|
return (image, boxes)
|
|
|
|
|
2018-02-27 16:34:14 +00:00
|
|
|
def decensor(args):
|
2018-06-15 14:31:00 +00:00
|
|
|
subdir = args.decensor_input_path
|
|
|
|
files = sorted(get_files(subdir))
|
|
|
|
|
|
|
|
for file in files:
|
|
|
|
file_path = os.path.join(subdir, file)
|
|
|
|
if os.path.isfile(file_path) and os.path.splitext(file_path)[1] == ".png":
|
|
|
|
print(file_path)
|
|
|
|
(image, boxes) = find_censor_boxes(file_path)
|
|
|
|
decensored_boxes = decensor_boxes(args, boxes)
|
|
|
|
for (box_pillow_image, cx, cy) in decensored_boxes:
|
|
|
|
box_image = sd.pillow_to_cv(box_pillow_image)
|
|
|
|
image = sd.insert_box((box_image, cx, cy), image)
|
|
|
|
|
|
|
|
sd.write_to_file(image, os.path.join(args.decensor_output_path, file))
|
|
|
|
|
|
|
|
def decensor_boxes(args, boxes):
|
2018-02-28 04:10:44 +00:00
|
|
|
x = tf.placeholder(tf.float32, [BATCH_SIZE, args.input_size, args.input_size, args.input_channel_size])
|
|
|
|
mask = tf.placeholder(tf.float32, [BATCH_SIZE, args.input_size, args.input_size, 1])
|
|
|
|
local_x = tf.placeholder(tf.float32, [BATCH_SIZE, args.local_input_size, args.local_input_size, args.input_channel_size])
|
|
|
|
global_completion = tf.placeholder(tf.float32, [BATCH_SIZE, args.input_size, args.input_size, args.input_channel_size])
|
|
|
|
local_completion = tf.placeholder(tf.float32, [BATCH_SIZE, args.local_input_size, args.local_input_size, args.input_channel_size])
|
2018-02-11 03:19:48 +00:00
|
|
|
is_training = tf.placeholder(tf.bool, [])
|
|
|
|
|
|
|
|
model = Model(x, mask, local_x, global_completion, local_completion, is_training, batch_size=BATCH_SIZE)
|
|
|
|
sess = tf.Session()
|
|
|
|
init_op = tf.global_variables_initializer()
|
|
|
|
sess.run(init_op)
|
|
|
|
|
|
|
|
saver = tf.train.Saver()
|
|
|
|
saver.restore(sess, './models/latest')
|
|
|
|
|
2018-02-11 05:45:14 +00:00
|
|
|
mask_decensor = []
|
2018-06-15 14:31:00 +00:00
|
|
|
x_decensor = []
|
|
|
|
|
|
|
|
for (box_image, cx, cy) in boxes:
|
|
|
|
image = np.array(box_image)
|
|
|
|
image = np.array(image / 127.5 - 1)
|
|
|
|
x_decensor.append(image)
|
|
|
|
|
2018-02-11 05:45:14 +00:00
|
|
|
x_decensor = np.array(x_decensor)
|
2018-02-12 17:08:02 +00:00
|
|
|
print(x_decensor.shape)
|
2018-02-11 05:45:14 +00:00
|
|
|
step_num = int(len(x_decensor) / BATCH_SIZE)
|
2018-02-11 03:19:48 +00:00
|
|
|
|
2018-06-15 14:31:00 +00:00
|
|
|
results = []
|
|
|
|
|
2018-02-11 03:19:48 +00:00
|
|
|
cnt = 0
|
|
|
|
for i in tqdm.tqdm(range(step_num)):
|
2018-02-11 05:45:14 +00:00
|
|
|
x_batch = x_decensor[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]
|
|
|
|
mask_batch = get_mask(x_batch)
|
2018-02-11 03:19:48 +00:00
|
|
|
completion = sess.run(model.completion, feed_dict={x: x_batch, mask: mask_batch, is_training: False})
|
|
|
|
for i in range(BATCH_SIZE):
|
|
|
|
img = completion[i]
|
|
|
|
img = np.array((img + 1) * 127.5, dtype=np.uint8)
|
2018-02-11 18:57:44 +00:00
|
|
|
original = x_batch[i]
|
|
|
|
original = np.array((original + 1) * 127.5, dtype=np.uint8)
|
2018-02-12 17:21:10 +00:00
|
|
|
if (poisson_blending_enabled):
|
|
|
|
img = blend(original, img, mask_batch[0,:,:,0])
|
2018-02-12 17:08:02 +00:00
|
|
|
output = Image.fromarray(img.astype('uint8'), 'RGB')
|
2018-06-15 14:31:00 +00:00
|
|
|
results.append((output, boxes[cnt][1], boxes[cnt][2]))
|
2018-02-28 04:10:44 +00:00
|
|
|
cnt += 1
|
2018-02-11 03:19:48 +00:00
|
|
|
|
2018-06-15 14:31:00 +00:00
|
|
|
tf.reset_default_graph()
|
|
|
|
return results
|
|
|
|
|
2018-02-11 05:45:14 +00:00
|
|
|
def get_mask(x_batch):
|
2018-02-11 03:19:48 +00:00
|
|
|
points = []
|
|
|
|
mask = []
|
|
|
|
for i in range(BATCH_SIZE):
|
2018-02-11 05:45:14 +00:00
|
|
|
raw = x_batch[i]
|
|
|
|
raw = np.array((raw + 1) * 127.5, dtype=np.uint8)
|
2018-02-28 04:10:44 +00:00
|
|
|
m = np.zeros((args.input_size, args.input_size, 1), dtype=np.uint8)
|
|
|
|
for x in range(args.input_size):
|
|
|
|
for y in range(args.input_size):
|
2018-02-27 16:36:20 +00:00
|
|
|
if np.array_equal(raw[x][y], mask_color):
|
2018-02-11 05:45:14 +00:00
|
|
|
m[x, y] = 1
|
2018-02-11 03:19:48 +00:00
|
|
|
mask.append(m)
|
2018-02-11 05:45:14 +00:00
|
|
|
return np.array(mask)
|
2018-02-11 03:19:48 +00:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
2018-02-28 04:10:44 +00:00
|
|
|
if not os.path.exists(args.decensor_output_path):
|
|
|
|
os.makedirs(args.decensor_output_path)
|
2018-06-15 14:31:00 +00:00
|
|
|
decensor(args)
|