readme, decensor

This commit is contained in:
deeppomf 2018-02-11 13:57:44 -05:00
parent dd41595ffa
commit a9d7000736
5 changed files with 138 additions and 2 deletions

View File

@ -3,6 +3,8 @@
This project applies an implementation of [Globally and Locally Consistent Image Completion](http://hi.cs.waseda.ac.jp/%7Eiizuka/projects/completion/data/completion_sig2017.pdf) to the problem of hentai decensorship. Using a deep fully convolutional neural network, DeepMindBreak can replace censored artwork in hentai with plausible reconstructions. The user needs to only specify the censored regions.
![Censored, decensored][/readme_images/collage.png]
# Limitations
This project is LIMITED in capability. It is a proof of concept of ongoing research.

View File

@ -7,12 +7,13 @@ import matplotlib.pyplot as plt
import sys
sys.path.append('..')
from model import Model
from poisson_blend import blend
IMAGE_SIZE = 128
LOCAL_SIZE = 64
HOLE_MIN = 24
HOLE_MAX = 48
BATCH_SIZE = 3
BATCH_SIZE = 1
image_folder = 'decensor_input_images/'
mask_color = [0, 255, 0]
@ -57,7 +58,10 @@ def decensor():
cnt += 1
img = completion[i]
img = np.array((img + 1) * 127.5, dtype=np.uint8)
output = Image.fromarray(img.astype('uint8'), 'RGB')
original = x_batch[i]
original = np.array((original + 1) * 127.5, dtype=np.uint8)
output = blend(original, img, mask_batch[0,:,:,0])
output = Image.fromarray(output.astype('uint8'), 'RGB')
dst = './decensor_output_images/{}.png'.format("{0:06d}".format(cnt))
output.save(dst)

108
poisson_blend.py Normal file
View File

@ -0,0 +1,108 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import scipy.sparse
import PIL.Image
import pyamg
import copy
# pre-process the mask array so that uint64 types from opencv.imread can be adapted
def prepare_mask(mask):
result = np.ndarray((mask.shape[0], mask.shape[1]), dtype=np.uint8)
for i in range(mask.shape[0]):
for j in range(mask.shape[1]):
if mask[i][j] > 0:
result[i][j] = 1
else:
result[i][j] = 0
mask = result
return mask
def blend(img_target, img_source, img_mask, offset=(0, 0)):
# compute regions to be blended
region_source = (
max(-offset[0], 0),
max(-offset[1], 0),
min(img_target.shape[0]-offset[0], img_source.shape[0]),
min(img_target.shape[1]-offset[1], img_source.shape[1]))
region_target = (
max(offset[0], 0),
max(offset[1], 0),
min(img_target.shape[0], img_source.shape[0]+offset[0]),
min(img_target.shape[1], img_source.shape[1]+offset[1]))
region_size = (region_source[2]-region_source[0], region_source[3]-region_source[1])
# clip and normalize mask image
img_mask = img_mask[region_source[0]:region_source[2], region_source[1]:region_source[3]]
#img_mask_copy = copy.deepcopy(img_mask)
# prepare_mask doesn't change anything
# img_mask = prepare_mask(img_mask)
# if np.array_equal(img_mask, img_mask_copy):
# print "eq"
img_mask[img_mask==0] = False
img_mask[img_mask!=False] = True
# create coefficient matrix
A = scipy.sparse.identity(np.prod(region_size), format='lil')
for y in range(region_size[0]):
for x in range(region_size[1]):
if img_mask[y,x]:
index = x+y*region_size[1]
A[index, index] = 4
if index+1 < np.prod(region_size):
A[index, index+1] = -1
if index-1 >= 0:
A[index, index-1] = -1
if index+region_size[1] < np.prod(region_size):
A[index, index+region_size[1]] = -1
if index-region_size[1] >= 0:
A[index, index-region_size[1]] = -1
A = A.tocsr()
# create poisson matrix for b
P = pyamg.gallery.poisson(img_mask.shape)
# for each layer (ex. RGB)
for num_layer in range(img_target.shape[2]):
# get subimages
t = img_target[region_target[0]:region_target[2],region_target[1]:region_target[3],num_layer]
s = img_source[region_source[0]:region_source[2], region_source[1]:region_source[3],num_layer]
t = t.flatten()
s = s.flatten()
# create b
b = P * s
for y in range(region_size[0]):
for x in range(region_size[1]):
if not img_mask[y,x]:
index = x+y*region_size[1]
b[index] = t[index]
# solve Ax = b
x = pyamg.solve(A,b,verb=False,tol=1e-10)
# assign x to target image
x = np.reshape(x, region_size)
x[x>255] = 255
x[x<0] = 0
x = np.array(x, img_target.dtype)
img_target[region_target[0]:region_target[2],region_target[1]:region_target[3],num_layer] = x
return img_target
def test():
img_mask = np.asarray(PIL.Image.open('./testimages/test1_mask.png'))
img_mask.flags.writeable = True
img_source = np.asarray(PIL.Image.open('./testimages/test1_src.png'))
img_source.flags.writeable = True
img_target = np.asarray(PIL.Image.open('./testimages/test1_target.png'))
img_target.flags.writeable = True
img_ret = blend(img_target, img_source, img_mask, offset=(40,-30))
img_ret = PIL.Image.fromarray(np.uint8(img_ret))
img_ret.save('./testimages/test1_ret.png')
if __name__ == '__main__':
test()

BIN
readme_images/collage.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 771 KiB

View File

@ -0,0 +1,22 @@
from PIL import Image
import matplotlib.pyplot as plt
def format_results(images, dst):
fig = plt.figure()
for i, image in enumerate(images):
text, img = image
fig.add_subplot(1, 3, i + 1)
plt.imshow(img)
plt.tick_params(labelbottom='off')
plt.tick_params(labelleft='off')
plt.gca().get_xaxis().set_ticks_position('none')
plt.gca().get_yaxis().set_ticks_position('none')
plt.xlabel(text)
plt.savefig(dst)
plt.close()
if __name__ == "__main__":
masked = Image.open("censored.png")
img = Image.open("decensored.png")
raw = Image.open("original.png")
format_results([['Input', masked], ['Output', img], ['Ground Truth', raw]], "result.png")