now that I have finished classifying my photo collection using Alexnet network, I now see that the function of embedding in tensorBoard feels very cool, and I especially hope to see the effect of my network classification through that kind of visual interface. But I really can"t do it about embedding. I hope God can give me some advice!
in my network, first save the picture set to a .npy file, and then grab it randomly in the following training process. It is not very clear where embedding should be loaded in the code. I want to save my original data, that is, pictures, into embedding to see the effect, but there has been a problem when loading. I hope God can help me!
the code is as follows:
def alexnet_main():
loopNum = 5
-sharp
files = np.load("label.npy", encoding="bytes")[()]
-sharp-embedding
log_dir = "model"
metadata = os.path.join(log_dir,"metadata.tsv")
j = 0
with open(metadata, "w") as metadata_file:
for i in files:
metadata_file.write("%d\n" % j)
j = j+1
-sharp
keys = [i for i in files]
myinput = tf.placeholder(dtype=tf.float32, shape=[None, 224, 224, 3], name="input")
mylabel = tf.placeholder(dtype=tf.float32, shape=[None, 10], name="label")
-sharp keepprob0.6
myoutput = alexnet(myinput, 0.6)
-sharp loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=myoutput, labels=mylabel))
-sharp 0.09
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.09).minimize(loss)
-sharp
valaccuracy = tf.reduce_mean(
tf.cast(
tf.equal(
tf.argmax(myoutput, 1),
tf.argmax(mylabel, 1)),
tf.float32))
-sharp tensorflowsaver
saver = tf.train.Saver()
init = tf.global_variables_initializer()
all_vars = tf.global_variables()
-sharp
with tf.Session() as sess:
sess.run(init)
saver = tf.train.Saver(all_vars)
saver.restore(sess, r"model/model.ckpt")-sharp
-sharp 100epoch
totalAcc = 0
for loop in range(loopNum):
-sharp
indices = np.arange(1100)-sharpmodify
random.shuffle(indices)
-sharp batch size50
-sharp 11001000100
for i in range(0, 0+1000, 50):
photo = []
label = []
-sharpprint("1:",label)
for j in range(0, 20):
photo.append(cv2.resize(cv2.imread(keys[indices[i + j]]), (224, 224))/225)
-sharpprint(i+j)
label.append(files[keys[indices[i + j]]])
-sharpprint("2:",label)
-sharpembedding,
target = tf.convert_to_tensor(photo)
embedding_var = tf.Variable(photo,"data_embedding")
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name
embedding.metadata_path = metadata
embedding.sprite.single_image_dim.extend([28,28])
projector.visualize_embeddings(tf.summary.FileWriter(log_dir), config)
m = getOneHotLabel(label, depth=10)
a, b = sess.run([optimizer, loss], feed_dict={myinput: photo, mylabel: m})
acc = 0
-sharp 20200
for i in range(1000, 1000+100, 20):
photo = []
label = []
for j in range(i, i + 5):
photo.append(cv2.resize(cv2.imread(keys[indices[j]]), (224, 224))/225)
label.append(files[keys[indices[j]]])
m = getOneHotLabel(label, depth=10)
acc += sess.run(valaccuracy, feed_dict={myinput: photo, mylabel: m})
-sharp 550
print("Epoch ", loop, ": validation rate: ", acc/5)
totalAcc += acc/5
print("final ",totalAcc/loopNum)
-sharp
saver.save(sess, "model/model.ckpt")
to_visualise = myinput
to_visualise = vector_to_mnist(to_visualise)
to_visualise = invert_grayscale(to_visualise)
sprite_image = create_sprite_image(to_visualise)
plt.imsave(metadata,sprite_image)
plt.imshow(sprite_image)
if __name__ == "__main__":
alexnet_main()
where
alexnet