Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
GZWQ authored Nov 14, 2018
1 parent 4d7ba3c commit 42730a3
Show file tree
Hide file tree
Showing 5 changed files with 581 additions and 1 deletion.
43 changes: 42 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1 +1,42 @@
# adversarial_obfuscator
# adversarial-obfuscator

Unofficial implementation of the paper [Protecting Visual Secrets using Adversarial Nets](https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014908)

Here is my experiment results.

## Utility Evaluation

Scene 1 : Trained on **original** images, Test on **original** images

Scene 2 : Trained on **original** images, Test on **generated** images

Scene 3 : Trained on **generated** images, Test on **generated** images



| Scene | Accuracy |
| :---: | :------: |
| 1 | 94.61% |
| 2 | 87.785% |
| 3 | 90.643% |



## Privacy Evaluation

Scene 1 : Trained on **original** images, Test on **generated** images

Scene 2 : Trained on **generated** images, Test on **generated** images



| Scene | Accuracy |
| :---: | :------: |
| 1 | 50% |
| 2 | 83.537% |



# Acknowledgments

Inspired by [Erik Linder-Norén](https://github.com/eriklindernoren/Keras-GAN/tree/master/dcgan) and [JWC](https://github.com/yushuinanrong/PPRL-VGAN)
126 changes: 126 additions & 0 deletions load_data.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
from keras.datasets import cifar10
import numpy as np
from keras.utils import np_utils
import qrcode,cv2
nb_classes = 2
from PIL import Image
import matplotlib.pyplot as plt

def cifar():
print("Loading cifar10 data ...")

# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()

## choose "airplane" and "horse" training pics
airplane_idx = []
horse_idx = []
for index in range(len(y_train)):
label = y_train[index]
if label == 0:
airplane_idx.append(index)
elif label == 7:
horse_idx.append(index)

airplane_data = X_train[airplane_idx,:,:,:]
horse_data = X_train[horse_idx,:,:,:]
X_train = np.concatenate([airplane_data,horse_data],axis=0)
airplane_data = np.zeros(shape=(5000,1))
horse_data = np.ones(shape=(5000,1))
y_train = np.concatenate([airplane_data,horse_data],axis=0)

## choose "airplane" and "horse" test pics
airplane_idx = []
horse_idx = []
for index in range(len(y_test)):
label = y_test[index]
if label == 0:
airplane_idx.append(index)
elif label == 7:
horse_idx.append(index)

airplane_data = X_test[airplane_idx, :, :, :]
horse_data = X_test[horse_idx, :, :, :]
X_test = np.concatenate([airplane_data, horse_data], axis=0)
airplane_data = np.zeros(shape=(1000,1))
horse_data = np.ones(shape=(1000,1))
y_test = np.concatenate([airplane_data, horse_data], axis=0)

print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
# Y_train = np_utils.to_categorical(y_train, nb_classes)
# Y_test = np_utils.to_categorical(y_test, nb_classes)

Y_train = y_train.reshape(-1,1)
Y_test = y_test.reshape(-1,1)


X_train = X_train.astype('float32')
X_test = X_test.astype('float32')

print("Finished data loading!!!")
return X_train,Y_train,X_test,Y_test

def load_cifar10():
X_train_public, Y_train_public, X_test_public, Y_test_public = cifar()
X_train_secret,X_test_secret = [],[]
X_train_public_temp,X_test_public_temp = [],[]


print("Generating secret training dataset ...")
for index,pic in enumerate(X_train_public):
pic_temp = (pic-127.5) / 127.5
X_train_public_temp.append(pic_temp)
qr = gene_qr(index)
qr_pos = np.random.uniform(0,21,size=2)
pic = Image.fromarray(np.uint8(pic))
pic.paste(qr,(int(qr_pos[0]),int(qr_pos[1])))
# plt.imshow(pic)
# plt.show()
pic = np.asarray(pic)
pic = (pic.copy()-127.5) / 127.5
X_train_secret.append(pic)
if index % 100 == 0:
print(index,"/",len(X_train_public))


print("Generating secret test dataset ...")
for index,pic in enumerate(X_test_public):
pic_temp = (pic - 127.5) / 127.5
X_test_public_temp.append(pic_temp)
qr = gene_qr(index)
qr_pos = np.random.uniform(0,21,size=2)
pic = Image.fromarray(np.uint8(pic))
pic.paste(qr, (int(qr_pos[0]), int(qr_pos[1])))
# plt.imshow(pic)
# plt.show()
pic = np.asarray(pic)
pic = (pic.copy() - 127.5) / 127.5
X_test_secret.append(pic)
if index % 100 == 0:
print(index,"/",len(X_test_public))

X_train_public = np.array(X_train_public_temp)
X_test_public = np.array(X_test_public_temp)
X_train_secret = np.array(X_train_secret)
X_test_secret = np.array(X_test_secret)

return X_train_public, Y_train_public, X_test_public, Y_test_public,\
X_train_secret, Y_train_public, X_test_secret, Y_test_public


def gene_qr(index):
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=2,
)
qr.add_data(str(index))
qr.make(fit=True)
img = qr.make_image()
img = img.resize((10,10))
return img
132 changes: 132 additions & 0 deletions train_.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
from __future__ import print_function, division

import load_data
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply
from keras.layers import Conv2D, Deconv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
import random
import matplotlib.pyplot as plt
import os,time
from PIL import Image
# from tqdm import tqdm
import numpy as np

class ae_gan():
def __init__(self):
self.img_rows, self.img_cols, self.channels = 32, 32, 3
self.img_shape = (self.img_rows, self.img_cols, self.channels)

optimizer = Adam(lr=0.0002,beta_1=0.5)
self.attack = self.discriminator()
self.attack.compile(loss=['binary_crossentropy'],
optimizer=optimizer,
metrics=['accuracy'])

self.ae = self.autoencoder()
input_image = Input(shape=self.img_shape)
generated_img = self.ae(input_image)

self.attack.trainable = False
valid = self.attack(generated_img)

self.combined_model = Model(input_image,[valid,generated_img])
self.combined_model.compile(loss=['binary_crossentropy','mae'],
loss_weights=[0.5, 0.5],
optimizer=optimizer)

def autoencoder(self):
input = Input(shape=self.img_shape)
h = Conv2D(64, (5, 5), strides=2, padding='same', activation='relu')(input)
h = Conv2D(128, (5, 5), strides=2, padding='same', activation='relu')(h)
h = Conv2D(256, (5, 5), strides=2, padding='same', activation='relu')(h)
encoded = Conv2D(512, (5, 5), strides=2, padding='same', activation='relu')(h)

h = Deconv2D(512, (5, 5), strides=2, padding='same', activation='relu')(encoded)
h = Deconv2D(256, (5, 5), strides=2, padding='same', activation='relu')(h)
h = Deconv2D(128, (5, 5), strides=2, padding='same', activation='relu')(h)
decoded = Deconv2D(3, (5, 5), strides=2, padding='same', activation='tanh')(h)

auto_encoder = Model(input, decoded)
auto_encoder.summary()

return auto_encoder

def discriminator(self):
input = Input(shape=self.img_shape)
h = Conv2D(64, (5, 5), strides=2, padding='same', activation='relu')(input)
h = Conv2D(128, (5, 5), strides=2, padding='same', activation='relu')(h)
h = Conv2D(256, (5, 5), strides=2, padding='same', activation='relu')(h)
h = Conv2D(512, (5, 5), strides=2, padding='same', activation='relu')(h)
h = Flatten()(h)
output_secret = Dense(1, activation='relu')(h)
discriminator = Model(input, output_secret)
discriminator.summary()

return discriminator

def train(self,epochs, batch_size=128, sample_interval=50):
x_train_public, y_train_public, _, _, \
x_train_secret, y_train_secret, _, _ = load_data.load_cifar10()

label_secret = np.ones(shape=(batch_size, 1))
label_public = np.zeros(shape=(batch_size, 1))

for epoch in range(1,epochs+1):
start = time.time()
print("In the epoch ",epoch,"/",epochs)

####### generate pics for public pics #######
idx_public = random.sample(range(0, x_train_public.shape[0]), batch_size)
image_batch_public = x_train_public[idx_public, :, :, :]
generated_images_public = self.ae.predict(image_batch_public)

####### generate pics for secret pics #######
idx_secret = random.sample(range(0, x_train_secret.shape[0]), batch_size)
image_batch_secret = x_train_secret[idx_secret, :, :, :]
generated_images_secret = self.ae.predict(image_batch_secret)

l1 = self.attack.train_on_batch(image_batch_public,label_public)
l2 = self.attack.train_on_batch(generated_images_public,label_public)
l3 = self.attack.train_on_batch(image_batch_secret,label_secret)
l4 = self.attack.train_on_batch(generated_images_secret,label_secret)

g_loss1 = self.combined_model.train_on_batch(image_batch_public,[label_public,image_batch_public])
g_loss2 = self.combined_model.train_on_batch(image_batch_secret,[label_public,image_batch_secret])

print("Epoch ",epoch,"took time",time.time()-start)
if epoch % 20 == 0:
self.save_model(epoch)
self.sample_images(image_batch_secret[0],epoch,'secret')
self.sample_images(image_batch_public[0],epoch,'public')
def sample_images(self, image, epoch, label):
image = np.expand_dims(image,axis=0)
gen_imgs = self.ae.predict(image) # output pixel size is between (-1,1)

gen_imgs = 127.5 * gen_imgs + 127.5

data = gen_imgs[0].astype(np.uint8)
output_path = './images_vaegan_'+label+'/'
if not os.path.exists(output_path):
os.mkdir(output_path)
img = Image.fromarray(data,'RGB')
img.save(output_path + "%d.png" % epoch)
plt.close()

def save_model(self, epoch):

def save(model, epoch, model_name):
output_path = './models_vaegan/'
if not os.path.exists(output_path):
os.mkdir(output_path)
model_path = output_path + str(epoch) + "_" + model_name + ".h5"
model.save(model_path)

save(self.ae, epoch, "autoencoder")
# save(self.attack, epoch, "discriminator")

if __name__ == '__main__':
model = ae_gan()
model.train(epochs=1000,batch_size=32,sample_interval=200)


Loading

0 comments on commit 42730a3

Please # to comment.