将CSV文件转换为TF记录

2024-09-28 17:15:46 发布

您现在位置:Python中文网/ 问答频道 /正文

我已经运行脚本5个多小时了。我有258个CSV文件,我想转换成TF记录。我写了以下脚本,正如我所说,我已经运行了5个多小时了:

import argparse
import os
import sys
import standardize_data
import tensorflow as tf

FLAGS = None
PATH = '/home/darth/GitHub Projects/gru_svm/dataset/train'

def _int64_feature(value):
    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))

def _float_feature(value):
    return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))

def convert_to(dataset, name):
    """Converts a dataset to tfrecords"""

    filename_queue = tf.train.string_input_producer(dataset)

    # TF reader
    reader = tf.TextLineReader()

    # default values, in case of empty columns
    record_defaults = [[0.0] for x in range(24)]

    key, value = reader.read(filename_queue)

    duration, service, src_bytes, dest_bytes, count, same_srv_rate, \
    serror_rate, srv_serror_rate, dst_host_count, dst_host_srv_count, \
    dst_host_same_src_port_rate, dst_host_serror_rate, dst_host_srv_serror_rate, \
    flag, ids_detection, malware_detection, ashula_detection, label, src_ip_add, \
    src_port_num, dst_ip_add, dst_port_num, start_time, protocol = \
    tf.decode_csv(value, record_defaults=record_defaults)

    features = tf.stack([duration, service, src_bytes, dest_bytes, count, same_srv_rate,
                        serror_rate, srv_serror_rate, dst_host_count, dst_host_srv_count,
                        dst_host_same_src_port_rate, dst_host_serror_rate, dst_host_srv_serror_rate,
                        flag, ids_detection, malware_detection, ashula_detection, src_ip_add,
                        src_port_num, dst_ip_add, dst_port_num, start_time, protocol])

    filename = os.path.join(FLAGS.directory, name + '.tfrecords')
    print('Writing {}'.format(filename))
    writer = tf.python_io.TFRecordWriter(filename)
    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        try:
            while not coord.should_stop():
                example, l = sess.run([features, label])
                print('Writing {dataset} : {example}, {label}'.format(dataset=sess.run(key),
                        example=example, label=l))
                example_to_write = tf.train.Example(features=tf.train.Features(feature={
                    'duration' : _float_feature(example[0]),
                    'service' : _int64_feature(int(example[1])),
                    'src_bytes' : _float_feature(example[2]),
                    'dest_bytes' : _float_feature(example[3]),
                    'count' : _float_feature(example[4]),
                    'same_srv_rate' : _float_feature(example[5]),
                    'serror_rate' : _float_feature(example[6]),
                    'srv_serror_rate' : _float_feature(example[7]),
                    'dst_host_count' : _float_feature(example[8]),
                    'dst_host_srv_count' : _float_feature(example[9]),
                    'dst_host_same_src_port_rate' : _float_feature(example[10]),
                    'dst_host_serror_rate' : _float_feature(example[11]),
                    'dst_host_srv_serror_rate' : _float_feature(example[12]),
                    'flag' : _int64_feature(int(example[13])),
                    'ids_detection' : _int64_feature(int(example[14])),
                    'malware_detection' : _int64_feature(int(example[15])),
                    'ashula_detection' : _int64_feature(int(example[16])),
                    'label' : _int64_feature(int(l)),
                    'src_ip_add' : _float_feature(example[17]),
                    'src_port_num' : _float_feature(example[18]),
                    'dst_ip_add' : _float_feature(example[19]),
                    'dst_port_num' : _float_feature(example[20]),
                    'start_time' : _float_feature(example[21]),
                    'protocol' : _int64_feature(int(example[22])),
                    }))
                writer.write(example_to_write.SerializeToString())
            writer.close()
        except tf.errors.OutOfRangeError:
            print('Done converting -- EOF reached.')
        finally:
            coord.request_stop()

        coord.join(threads)

def main(unused_argv):
    files = standardize_data.list_files(path=PATH)

    convert_to(dataset=files, name='train')

它已经让我想到,也许它卡在一个无限循环中?我要做的是读取每个CSV文件(258个CSV文件)中的所有行,并将这些行写入TF记录(当然是一个特性和一个标签)。然后,当没有更多的行可用,或者CSV文件已经用完时,停止循环。在

standardize_data.list_files(path)是我在另一个模块中编写的函数。我只是在这个剧本中重复使用了它。它的作用是返回在PATH中找到的所有文件的列表。请注意,myPATH中的文件仅包含CSV文件。在


Tags: srchostrateportexampletfcounttrain