cheonbi 2019. 11. 11. 14:11

from keras import backend as K

from keras.utils import np_utils

from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape

from keras.layers.convolutional import Convolution3D, MaxPooling3D, AveragePooling3D

from keras.layers.convolutional import Convolution2D, MaxPooling2D, AveragePooling2D

from keras.layers import LSTM, TimeDistributed

from keras.layers import Input

from keras.models import Model

from keras.models import load_model

from keras.metrics import top_k_categorical_accuracy

from sklearn import preprocessing

from sklearn.manifold import TSNE

from sklearn.model_selection import train_test_split

from sklearn.metrics import confusion_matrix

from PIL import Image

import numpy as np

import pandas as pd

import matplotlib.pyplot as plt

import glob

import itertools

%matplotlib inline

 

# Hyperparameter Definition

ped2_label_by_frame = pd.read_table('./dataset/UCSDped2/label.m', header=None, skiprows=0).values

resized_as = (30, 40)

window_size = 10

label_threshold = int(window_size*0.5)

 

 

ped1_dataset_feature, ped1_dataset_label = np.zeros((0, window_size, resized_as[0], resized_as[1])), np.zeros((0, ))

normal_video_list, abnormal_video_list = sorted(glob.glob('../../../dataset_tmp/ucsd/UCSDped1/Train/*')), sorted(glob.glob('../../../dataset_tmp/ucsd/UCSDped1/Test/*'))

 

 

# Preprocessing Ped1

ped1_dataset_feature, ped1_dataset_label = np.zeros((0, window_size, resized_as[0], resized_as[1])), np.zeros((0, ))

normal_video_list, abnormal_video_list = sorted(glob.glob('../../../dataset_tmp/ucsd/UCSDped1/Train/*')), sorted(glob.glob('../../../dataset_tmp/ucsd/UCSDped1/Test/*'))

for v in abnormal_video_list:

    video_index = int(v.split('/')[-1].split('Test')[-1])

    video_label = ped1_label_by_frame[video_index-1]

    frames = sorted(glob.glob(v+'/*.tif'))

    n_frames = len(frames)

    n_windows = n_frames-window_size+1

    window_buffer_feature = np.zeros((n_windows, window_size, resized_as[0], resized_as[1]), dtype='int')

    window_buffer_label = np.zeros((n_windows,), dtype='int')

    for wi in range(n_windows):

        for w in range(window_size):

            n_abnormal_frame = 0

            for vl in range(len(video_label[0].split(','))):

                for li in range(wi+w, wi+w+window_size):

                    if(int(video_label[0].split(',')[vl].split(':')[0])-1<=li and li<=int(video_label[0].split(',')[vl].split(':')[1])-1):

                        n_abnormal_frame += 1

            if(label_threshold<=n_abnormal_frame):

                window_buffer_label[wi] = 1

            window_buffer_feature[wi][w] = np.asarray(Image.open(frames[wi+w]).resize((resized_as[1], resized_as[0]), Image.ANTIALIAS))

    ped1_dataset_feature, ped1_dataset_label = np.concatenate((ped1_dataset_feature, window_buffer_feature), axis=0), np.concatenate((ped1_dataset_label, window_buffer_label), axis=0)

    print("File name: %s, n_frames: %d, n_windows: %d, Abnormal frame range: %s, n_abnormal_window: %d" %(v.split('/')[-1], n_frames, n_windows, video_label[0], len(window_buffer_label[window_buffer_label==1])))

for v in normal_video_list:

    video_index = int(v.split('/')[-1].split('Train')[-1])

    frames = sorted(glob.glob(v+'/*.tif'))

    n_frames = len(frames)

    n_windows = n_frames-window_size+1

    window_buffer_feature = np.zeros((n_windows, window_size, resized_as[0], resized_as[1]), dtype='int')

    window_buffer_label = np.zeros((n_windows,), dtype='int')

    for wi in range(n_windows):

        for w in range(window_size):

            window_buffer_label[wi] = 0

            window_buffer_feature[wi][w] = np.asarray(Image.open(frames[wi+w]).resize((resized_as[1], resized_as[0]), Image.ANTIALIAS))

    ped1_dataset_feature, ped1_dataset_label = np.concatenate((ped1_dataset_feature, window_buffer_feature), axis=0), np.concatenate((ped1_dataset_label, window_buffer_label), axis=0)

    print("File name: %s, n_frame: %d, n_windows: %d, Abnormal frame range: None, n_abnormal_window: %d" %(v.split('/')[-1], n_frames, n_windows, len(window_buffer_label[window_buffer_label==1])))

np.save('../../../dataset_tmp/ucsd/ped1_np/ped1_X_win'+str(window_size), ped1_dataset_feature), np.save('../../../dataset_tmp/ucsd/ped1_np/ped1_Y_win'+str(window_size), ped1_dataset_label)

print("Done. Dataset generated: %s, %s. Class balance: %d: %d" %(ped1_dataset_feature.shape, ped1_dataset_label.shape, ped1_dataset_label[ped1_dataset_label==0].shape[0], ped1_dataset_label[ped1_dataset_label==1].shape[0]))

 

 

# Preprocessing Ped2

ped2_dataset_feature, ped2_dataset_label = np.zeros((0, window_size, resized_as[0], resized_as[1])), np.zeros((0, ))

normal_video_list, abnormal_video_list = sorted(glob.glob('../../../dataset_tmp/ucsd/UCSDped2/Train/*')), sorted(glob.glob('../../../dataset_tmp/ucsd/UCSDped2/Test/*'))

for v in abnormal_video_list:

    video_index = int(v.split('/')[-1].split('Test')[-1])

    video_label = ped2_label_by_frame[video_index-1]

    frames = sorted(glob.glob(v+'/*.tif'))

    n_frames = len(frames)

    n_windows = n_frames-window_size+1

    window_buffer_feature = np.zeros((n_windows, window_size, resized_as[0], resized_as[1]), dtype='int')

    window_buffer_label = np.zeros((n_windows,), dtype='int')

    for wi in range(n_windows):

        for w in range(window_size):

            n_abnormal_frame = 0

            for vl in range(len(video_label[0].split(','))):

                for li in range(wi+w, wi+w+window_size):

                    if(int(video_label[0].split(',')[vl].split(':')[0])-1<=li and li<=int(video_label[0].split(',')[vl].split(':')[1])-1):

                        n_abnormal_frame += 1

            if(label_threshold<=n_abnormal_frame):

                window_buffer_label[wi] = 1

            window_buffer_feature[wi][w] = np.asarray(Image.open(frames[wi+w]).resize((resized_as[1], resized_as[0]), Image.ANTIALIAS))

    ped2_dataset_feature, ped2_dataset_label = np.concatenate((ped2_dataset_feature, window_buffer_feature), axis=0), np.concatenate((ped2_dataset_label, window_buffer_label), axis=0)

    print("File name: %s, n_frames: %d, n_windows: %d, Abnormal frame range: %s, n_abnormal_window: %d" %(v.split('/')[-1], n_frames, n_windows, video_label[0], len(window_buffer_label[window_buffer_label==1])))

for v in normal_video_list:

    video_index = int(v.split('/')[-1].split('Train')[-1])

    frames = sorted(glob.glob(v+'/*.tif'))

    n_frames = len(frames)

    n_windows = n_frames-window_size+1

    window_buffer_feature = np.zeros((n_windows, window_size, resized_as[0], resized_as[1]), dtype='int')

    window_buffer_label = np.zeros((n_windows,), dtype='int')

    for wi in range(n_windows):

        for w in range(window_size):

            window_buffer_label[wi] = 0

            window_buffer_feature[wi][w] = np.asarray(Image.open(frames[wi+w]).resize((resized_as[1], resized_as[0]), Image.ANTIALIAS))

    ped2_dataset_feature, ped2_dataset_label = np.concatenate((ped2_dataset_feature, window_buffer_feature), axis=0), np.concatenate((ped2_dataset_label, window_buffer_label), axis=0)

    print("File name: %s, n_frame: %d, n_windows: %d, Abnormal frame range: None, n_abnormal_window: %d" %(v.split('/')[-1], n_frames, n_windows, len(window_buffer_label[window_buffer_label==1])))

np.save('../../../dataset_tmp/ucsd/ped2_np/ped2_X_win'+str(window_size), ped2_dataset_feature), np.save('../../../dataset_tmp/ucsd/ped2_np/ped2_Y_win'+str(window_size), ped2_dataset_label)

print("Done. Dataset generated: %s, %s. Class balance: %d: %d" %(ped2_dataset_feature.shape, ped2_dataset_label.shape, ped2_dataset_label[ped2_dataset_label==0].shape[0], ped2_dataset_label[ped2_dataset_label==1].shape[0]))