Deep neural network dataset size

problem description

I only have more than a thousand data sets. Is it not enough to use the model of deep neural network? it is easy to underfit

.

the environmental background of the problems and what methods you have tried

my previous training referred to a two-layer CIFAR convolution layer and tested
with 1000 iterations per 10batch_size
results

related codes

/ / Please paste the code text below (do not replace the code with pictures)
import cv2
import numpy as np
import os
import random
import tensorflow as tf
import sklearn.utils

def read_and_decode (filename, testing = False):

filename_queue = tf.train.string_input_producer([filename])

reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
if testing == False:
    features = tf.parse_single_example(serialized_example,
                                       features={
                                           "label": tf.FixedLenFeature([], tf.int64),
                                           "img_raw" : tf.FixedLenFeature([], tf.string),
                                       })
    img = tf.decode_raw(features["img_raw"], tf.uint8)
    img = tf.reshape(img, [600, 328, 1])
    img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
    label = tf.cast(features["label"], tf.int32)

    return img, label 
else:
    features = tf.parse_single_example(serialized_example,
                                       features={
                                           "label_test": tf.FixedLenFeature([], tf.int64),
                                           "img_raw_test" : tf.FixedLenFeature([], tf.string),
                                       })
    img = tf.decode_raw(features["img_raw_test"], tf.uint8)
    img = tf.reshape(img, [600, 328, 1])
    img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
    label = tf.cast(features["label_test"], tf.int32)

    return img, label

if name ="_ _ main__":

img, label = read_and_decode("train.tfrecords")


img_train, label_train = tf.train.shuffle_batch([img, label],
                                                batch_size=10, capacity=2000,
                                                min_after_dequeue=1000)

img_raw_test, label_test = read_and_decode("test.tfrecords", testing = True)

img_test, label_test = tf.train.shuffle_batch([img_raw_test, label_test],
                                                 batch_size=10, capacity=2000,
                                                 min_after_dequeue=1000)


print("begin")

print("begin data")

def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev =  0.1)
    return tf.Variable(initial)

def bias_variable(shape):
    initial = tf.constant(0.1, shape = shape)
    return tf.Variable(initial)

def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides = [1, 1, 1, 1], padding="SAME")

def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = "SAME")

def avg_pool_82x150(x):
    return tf.nn.avg_pool(x, ksize = [1, 150, 82, 1], strides = [1, 150, 82, 1], padding = "SAME")


x = tf.placeholder(tf.float32, [None, 600, 328, 1])
y = tf.placeholder(tf.float32, [None, 6])

W_conv1 = weight_variable([5, 5, 1, 64])
b_conv1 = bias_variable([64])

x_image = tf.reshape(x, [-1, 600, 328, 1])

h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)

W_conv2 = weight_variable([5, 5, 64, 64])
b_conv2 = bias_variable([64])

h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
    
W_conv3 = weight_variable([5, 5, 64, 6])
b_conv3 = bias_variable([6])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)


-sharp   82*150
nt_hpool3 = avg_pool_82x150(h_conv3)
nt_hpool3_flat = tf.reshape( nt_hpool3 , [-1, 6])
y_conv = tf.nn.softmax(nt_hpool3_flat)    


cross_entropy = -tf.reduce_sum(y*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))


-sharp
sess = tf.Session()
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess = sess)

for i in range(1000):
    image_batch, label_batch = sess.run([img_train, label_train])
    label_b = np.eye(6, dtype =float)[label_batch]
    train_step.run(feed_dict = {x:image_batch, y:label_b},session = sess)

    if i%20 == 0:
        train_accuracy = accuracy.eval(feed_dict = {x:image_batch, y:label_b}, session = sess)
        print("step %d, training accuracy %g" %(i, train_accuracy))

image_batch, label_batch = sess.run([img_test, label_test])
label_b = np.eye(6, dtype = float)[label_batch]
print("finished!test accuracy %g" %accuracy.eval(feed_dict = {x: image_batch, y:label_b}, session = sess))

what result do you expect? What is the error message actually seen?

you can see that the generalization ability here is still quite weak
whether it is underfitted
and whether the dataset is a hard wound

Jun.07,2021
MySQL Query : SELECT * FROM `codeshelper`.`v9_news` WHERE status=99 AND catid='6' ORDER BY rand() LIMIT 5
MySQL Error : Disk full (/tmp/#sql-temptable-64f5-1b37aa0-2b5fc.MAI); waiting for someone to free some space... (errno: 28 "No space left on device")
MySQL Errno : 1021
Message : Disk full (/tmp/#sql-temptable-64f5-1b37aa0-2b5fc.MAI); waiting for someone to free some space... (errno: 28 "No space left on device")
Need Help?