diegoalejogm / gans Goto Github PK
View Code? Open in Web Editor NEWGenerative Adversarial Networks implemented in PyTorch and Tensorflow
License: MIT License
Generative Adversarial Networks implemented in PyTorch and Tensorflow
License: MIT License
any ideas?
from utils import Logger
ImportError: cannot import name 'Logger'
utils==0.9.0
$ python --version
Python 3.6.4 :: Anaconda, Inc.
in discriminator
with tf.variable_scope("conv4"):
conv4 = default_conv2d(conv3, 1024)
conv4 = layers.batch_normalization(conv3) // here it will be conv4 instead of conv3 ???
conv4 = nn.leaky_relu(conv3,alpha=0.2)
when trying to run 1. Vanilla GAN PyTorch.ipynb
on my laptop I got this error
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-69-81fdb6b15e61> in <module>()
19 g_error = train_generator(g_optimizer, fake_data)
20 # Log error
---> 21 logger.log(d_error, g_error, epoch, n_batch, num_batches)
22
23 # Display Progress
~/gans/utils.py in log(self, d_error, g_error, epoch, n_batch, num_batches)
AttributeError: 'function' object has no attribute 'Variable'
You probably forget the parenthesis there
if torch.cuda.is_available(): return n.cuda() <==
what is utils.py file i Thought its generator discriminator train model but i m getting error in that please can you help me
from future import print_function, division
#from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from skimage.io import imread_collection
import cv2
import matplotlib.pyplot as plt
import random
import sys
import numpy as np
class GAN():
def init(self):
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100
optimizer = Adam(0.0002, 0.5)
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generates imgs
z = Input(shape=(self.latent_dim,))
img = self.generator(z)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated images as input and determines validity
validity = self.discriminator(img)
# The combined model (stacked generator and discriminator)
# Trains the generator to fool the discriminator
self.combined = Model(z, validity)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
def build_generator(self):
model = Sequential()
model.add(Dense(256, input_dim=self.latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(self.img_shape), activation='tanh'))
model.add(Reshape(self.img_shape))
model.summary()
noise = Input(shape=(self.latent_dim,))
img = model(noise)
return Model(noise, img)
def build_discriminator(self):
model = Sequential()
model.add(Flatten(input_shape=self.img_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(1, activation='sigmoid'))
model.summary()
img = Input(shape=self.img_shape)
validity = model(img)
return Model(img, validity)
def train(self, epochs, batch_size=128, sample_interval=50):
# Load the dataset
#(X_train, _), (_, _) = mnist.load_data()
col_dir = 'C:/Users/Ajaykumar/Data_set/hack/dataset_2/Images/*.jpg'
#creating a collection with the available images
col = imread_collection(col_dir)
all_images=np.zeros((450,64,64,3))#64*64 Number of pixels
X=np.zeros((450,64,64,3))
for i in range(450):
#if i%10==0:
#print(i)
var=cv2.resize(col[i],(64,64))
#print(var)
all_images[i,:,:]=var
all_images[i,:,:]=all_images[i,:,:]
#X_train[i,:,:] = all_images[i,:,:]
#print(np.shape(X_train))
# Rescale -1 to 1
all_images = all_images / 127.5 - 1.
all_images = np.expand_dims(all_images, axis=3)
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random batch of images
#random.sample(range(0, dataset.shape[0]), n_samples)
idx = random.sample(range(0, all_images.shape[0]), batch_size)
imgs = all_images[idx]
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
# Generate a batch of new images
gen_imgs = self.generator.predict(noise)
# Train the discriminator
d_loss_real = self.discriminator.train_on_batch(imgs, valid)
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator
# ---------------------
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
# Train the generator (to have the discriminator label samples as valid)
g_loss = self.combined.train_on_batch(noise, valid)
# Plot the progress
print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_images(epoch)
def sample_images(self, epoch):
r, c = 5, 5
noise = np.random.normal(0, 1, (r * c, self.latent_dim))
gen_imgs = self.generator.predict(noise)
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray')
axs[i,j].axis('off')
cnt += 1
fig.savefig("C:/Users/Ajaykumar/Data_set/hack/images/aa/%d.png" % epoch)
plt.close()
if name == 'main':
gan = GAN()
gan.train(epochs=30000, batch_size=32, sample_interval=200)
Hi, I'm getting an error while running your Vanilla GAN PyTorch.ipynb notebook.
RuntimeError Traceback (most recent call last)
in
2
3 for epoch in range(num_epochs):
----> 4 for n_batch, (real_batch,_) in enumerate(data_loader):
5
6 # 1. Train Discriminator
C:\anaconda3\lib\site-packages\torch\utils\data\dataloader.py in next(self)
613 if self.num_workers == 0: # same-process loading
614 indices = next(self.sample_iter) # may raise StopIteration
--> 615 batch = self.collate_fn([self.dataset[i] for i in indices])
616 if self.pin_memory:
617 batch = pin_memory_batch(batch)
C:\anaconda3\lib\site-packages\torch\utils\data\dataloader.py in (.0)
613 if self.num_workers == 0: # same-process loading
614 indices = next(self.sample_iter) # may raise StopIteration
--> 615 batch = self.collate_fn([self.dataset[i] for i in indices])
616 if self.pin_memory:
617 batch = pin_memory_batch(batch)
C:\anaconda3\lib\site-packages\torchvision\datasets\mnist.py in getitem(self, index)
93
94 if self.transform is not None:
---> 95 img = self.transform(img)
96
97 if self.target_transform is not None:
C:\anaconda3\lib\site-packages\torchvision\transforms\transforms.py in call(self, img)
58 def call(self, img):
59 for t in self.transforms:
---> 60 img = t(img)
61 return img
62
C:\anaconda3\lib\site-packages\torchvision\transforms\transforms.py in call(self, tensor)
161 Tensor: Normalized Tensor image.
162 """
--> 163 return F.normalize(tensor, self.mean, self.std, self.inplace)
164
165 def repr(self):
C:\anaconda3\lib\site-packages\torchvision\transforms\functional.py in normalize(tensor, mean, std, inplace)
206 mean = torch.tensor(mean, dtype=torch.float32)
207 std = torch.tensor(std, dtype=torch.float32)
--> 208 tensor.sub_(mean[:, None, None]).div_(std[:, None, None])
209 return tensor
210
RuntimeError: output with shape [1, 28, 28] doesn't match the broadcast shape [3, 28, 28]
Packages installed:
torch == 1.0.1
tensorflow == 1.13.1
tensorboardX == 1.6
numpy == 1.16.2
matplotlib == 3.0.3
jupyter == 1.0.0
Is it about running a script on newer version of packages? Sorry, I'm quite new in those techniques.
Packages in requirements.txt are not anymore available.
Thanks very much, your time is appreciated!
Hi there. I'm very interested in creating GANs but have been unable to find much information, so I was hoping you could help answer somethings. Just a couple quick questions.
tf.contrib.gan
. Can you briefly talk about why? Is it too restrictive?IMAGE_SIZE
to be the number of floats, and supplying appropriate training data (from, say, a CSV)?Thanks very much, your time is appreciated!
Hi I have tried to run your code but in optimizer part it is showing error of 'Tensor' object is not callable'.I don't know what has gone wrong with it.
I have a doubt, at the end of the training phase we have both generator and discriminator. Now i only want to test the discriminator... How can i do that?
When I clone the master and run as per instructions here: https://mmf.readthedocs.io/en/latest/notes/installation.html
I see following test failures with pytest:
======================== short test summary info ========================
FAILED tests/modules/test_layers.py::TestModuleLayers::test_bert_classifier_head
FAILED tests/modules/test_layers.py::TestModuleLayers::test_mlp - Asse...
FAILED tests/utils/test_checkpoint.py::TestUtilsCheckpoint::test_save_and_load_state_dict
For test_checkpoint.py: E RuntimeError: Expected object of device type cuda but got device type cpu for argument #2 'other' in call to _th_equal
For other cases, I see errors like this:
> output.squeeze().tolist(), [0.5452202, -0.0437842, -0.377468], decimal=3
)
E AssertionError:
E Arrays are not almost equal to 3 decimals
E
E Mismatched elements: 3 / 3 (100%)
E Max absolute difference: 0.45450824
E Max relative difference: 10.38064516
E x: array([ 0.741, 0.411, -0.254])
E y: array([ 0.545, -0.044, -0.377])
tests/modules/test_layers.py:114: AssertionError
Can you train both the Vanilla and the DCN networks with your own data set or some data set like ImageNet for example. and If so how would you do it? for both Pytorch and Tensorflow.
Hello,
Is it possible to modify your model to make it work with text data ? I am encountering some difficulty to get data in french for a text classification task and I was wondering if using gan system could help augmentate my small data.
Best regards,
Hello Everyone!
I have two problems when I run this whole script by giving my path of MNIST data
by data folder path is this "C:\Users\muzam\Anaconda3\envs\tensorflow\Data_Practice\train-images-idx3-ubyte\dataset\MNIST"
when I run the "Training code" in juypter
logger = Logger(model_name='VGAN', data_name='MNIST')
num_epochs = 20
for epoch in range(num_epochs):
for n_batch, (real_batch,_) in enumerate(data_loader):
N = real_batch.size(0)
# 1. Train Discriminator
real_data = Variable(images_to_vectors(real_batch))
# Generate fake data and detach
# (so gradients are not calculated for generator)
fake_data = generator(noise(N)).detach()
# Train D
d_error, d_pred_real, d_pred_fake =
train_discriminator(d_optimizer, real_data, fake_data)
# 2. Train Generator
# Generate fake data
fake_data = generator(noise(N))
# Train G
g_error = train_generator(g_optimizer, fake_data)
# Log batch error
logger.log(d_error, g_error, epoch, n_batch, num_batches)
# Display Progress every few batches
if (n_batch) % 100 == 0:
test_images = vectors_to_images(generator(test_noise))
test_images = test_images.data
logger.log_images(
test_images, num_test_samples,
epoch, n_batch, num_batches
);
# Display status Logs
logger.display_status(
epoch, num_epochs, n_batch, num_batches,
d_error, g_error, d_pred_real, d_pred_fake
)
First it give the error that my kernel is dead
then it after kernel is again idle when I run it again it give me the error "name Logger is not defined as shown below
And it also generate the folder in data in which subdirectories VGAN and MNIST but this folder are empty.
I don't know what the problem can any one tell me the solution.
Thank you very much for you time and consideration. I am waiting for your reply.
Regards:
Muhammad Muzammil Mukhtar
I am trying to run this program, but it is returning
RuntimeError: Expected object of device type cuda but got device type cpu for argument #2 'mat1' in call to _th_addmm
.
Note: I am using the notebook file as an actual python file.
from IPython import display
from utils import Logger
import torch
from torch import nn, optim
from torch.autograd.variable import Variable
from torchvision import transforms, datasets
DATA_FOLDER = './torch_data/VGAN/MNIST'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(torch.cuda.is_available(), device)
def mnist_data():
compose = transforms.Compose([
transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])
out_dir = '{}/dataset'.format(DATA_FOLDER)
return datasets.MNIST(root=out_dir, train=True, transform=compose, download=True)
# Load data
data = mnist_data()
# Create loader with data, so that we can iterate over it
data_loader = torch.utils.data.DataLoader(data, batch_size=100, shuffle=True)
# Num batches
num_batches = len(data_loader)
class DiscriminatorNet(torch.nn.Module):
"""
A three hidden-layer discriminative neural network
"""
def __init__(self):
super(DiscriminatorNet, self).__init__()
n_features = 784
n_out = 1
self.hidden0 = nn.Sequential(
nn.Linear(n_features, 1024),
nn.LeakyReLU(0.2),
nn.Dropout(0.3)
)
self.hidden1 = nn.Sequential(
nn.Linear(1024, 512),
nn.LeakyReLU(0.2),
nn.Dropout(0.3)
)
self.hidden2 = nn.Sequential(
nn.Linear(512, 256),
nn.LeakyReLU(0.2),
nn.Dropout(0.3)
)
self.out = nn.Sequential(
torch.nn.Linear(256, n_out),
torch.nn.Sigmoid()
)
def forward(self, x):
x = self.hidden0(x)
x = self.hidden1(x)
x = self.hidden2(x)
x = self.out(x)
return x
class GeneratorNet(torch.nn.Module):
"""
A three hidden-layer generative neural network
"""
def __init__(self):
super(GeneratorNet, self).__init__()
n_features = 100
n_out = 784
self.hidden0 = nn.Sequential(
nn.Linear(n_features, 256),
nn.LeakyReLU(0.2)
)
self.hidden1 = nn.Sequential(
nn.Linear(256, 512),
nn.LeakyReLU(0.2)
)
self.hidden2 = nn.Sequential(
nn.Linear(512, 1024),
nn.LeakyReLU(0.2)
)
self.out = nn.Sequential(
nn.Linear(1024, n_out),
nn.Tanh()
)
def forward(self, x):
x = self.hidden0(x)
x = self.hidden1(x)
x = self.hidden2(x)
x = self.out(x)
return x
def images_to_vectors(images):
return images.view(images.size(0), 784)
def vectors_to_images(vectors):
return vectors.view(vectors.size(0), 1, 28, 28)
def noise(size):
'''
Generates a 1-d vector of gaussian sampled random values
'''
n = Variable(torch.randn(size, 100))
return n
discriminator = DiscriminatorNet()
generator = GeneratorNet()
if torch.cuda.is_available():
discriminator.cuda()
generator.cuda()
# Optimizers
d_optimizer = optim.Adam(discriminator.parameters(), lr=0.0002)
g_optimizer = optim.Adam(generator.parameters(), lr=0.0002)
# Loss function
loss = nn.BCELoss()
# Number of steps to apply to the discriminator
d_steps = 1 # In Goodfellow et. al 2014 this variable is assigned to 1
# Number of epochs
num_epochs = 200
def real_data_target(size):
'''
Tensor containing ones, with shape = size
'''
data = Variable(torch.ones(size, 1))
if torch.cuda.is_available(): return data.cuda()
return data
def fake_data_target(size):
'''
Tensor containing zeros, with shape = size
'''
data = Variable(torch.zeros(size, 1))
if torch.cuda.is_available(): return data.cuda()
return data
def train_discriminator(optimizer, real_data, fake_data):
# Reset gradients
optimizer.zero_grad()
# 1.1 Train on Real Data
prediction_real = discriminator(real_data)
# Calculate error and backpropagate
error_real = loss(prediction_real, real_data_target(real_data.size(0)))
error_real.backward()
# 1.2 Train on Fake Data
prediction_fake = discriminator(fake_data)
# Calculate error and backpropagate
error_fake = loss(prediction_fake, fake_data_target(real_data.size(0)))
error_fake.backward()
# 1.3 Update weights with gradients
optimizer.step()
# Return error
return error_real + error_fake, prediction_real, prediction_fake
def train_generator(optimizer, fake_data):
# 2. Train Generator
# Reset gradients
optimizer.zero_grad()
# Sample noise and generate fake data
prediction = discriminator(fake_data)
# Calculate error and backpropagate
error = loss(prediction, real_data_target(prediction.size(0)))
error.backward()
# Update weights with gradients
optimizer.step()
# Return error
return error
num_test_samples = 16
test_noise = noise(num_test_samples)
logger = Logger(model_name='VGAN', data_name='MNIST')
for epoch in range(num_epochs):
for n_batch, (real_batch,_) in enumerate(data_loader):
# 1. Train Discriminator
real_data = Variable(images_to_vectors(real_batch))
if torch.cuda.is_available(): real_data = real_data.cuda()
# Generate fake data
fake_data = generator(noise(real_data.size(0))).detach()
# Train D
d_error, d_pred_real, d_pred_fake = train_discriminator(d_optimizer,
real_data, fake_data)
# 2. Train Generator
# Generate fake data
fake_data = generator(noise(real_batch.size(0)))
# Train G
g_error = train_generator(g_optimizer, fake_data)
# Log error
logger.log(d_error, g_error, epoch, n_batch, num_batches)
# Display Progress
if (n_batch) % 100 == 0:
display.clear_output(True)
# Display Images
test_images = vectors_to_images(generator(test_noise)).data.cpu()
logger.log_images(test_images, num_test_samples, epoch, n_batch, num_batches);
# Display status Logs
logger.display_status(
epoch, num_epochs, n_batch, num_batches,
d_error, g_error, d_pred_real, d_pred_fake
)
# Model Checkpoints
logger.save_models(generator, discriminator, epoch)
A declarative, efficient, and flexible JavaScript library for building user interfaces.
๐ Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.
TypeScript is a superset of JavaScript that compiles to clean JavaScript output.
An Open Source Machine Learning Framework for Everyone
The Web framework for perfectionists with deadlines.
A PHP framework for web artisans
Bring data to life with SVG, Canvas and HTML. ๐๐๐
JavaScript (JS) is a lightweight interpreted programming language with first-class functions.
Some thing interesting about web. New door for the world.
A server is a program made to process requests and deliver data to clients.
Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.
Some thing interesting about visualization, use data art
Some thing interesting about game, make everyone happy.
We are working to build community through open source technology. NB: members must have two-factor auth.
Open source projects and samples from Microsoft.
Google โค๏ธ Open Source for everyone.
Alibaba Open Source for everyone
Data-Driven Documents codes.
China tencent open source team.