You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
def fprop(self, x):
"""
Exposes all the layers of the model returned by get_layer_names.
:param x: A symbolic representation of the network input
:return: A dictionary mapping layer names to the symbolic
representation of their output.
"""
global fprop_dict
from keras.models import Model as KerasModel
if self.keras_model is None:
# Get the input layer
# self.model(tf.keras.Input(shape = (270,1)))
# new_input = self.model.get_input_at(0)
new_input = self.model.inputs
print(new_input)
# Make a new model that returns each of the layers as output
out_layers = [x_layer.output for x_layer in self.model.layers]
# print(out_layers)
self.keras_model = KerasModel(new_input, out_layers)
# and get the outputs for that model on the input x
outputs = self.keras_model(x)
# Keras only returns a list for outputs of length >= 1, if the model
# is only one layer, wrap a list
if len(self.model.layers) == 1:
outputs = [outputs]
# compute the dict to return
fprop_dict = dict(zip(self.get_layer_names(), outputs))
return fprop_dict
src/cleverhans_tutorials/tsc_tutorial_keras_tf.py
Cover tsc_tutorial_keras_tf.py with the following
from __future__ importabsolute_importfrom __future__ importdivisionfrom __future__ importprint_functionfrom __future__ importunicode_literalsimportosimportsklearnimporttensorflowastffromtensorflow.python.platformimportflagsimportnumpyasnpimportkerasfromkerasimportbackendimportpandasaspdfromcleverhans_copy.attacksimportFastGradientMethodfromcleverhans_copy.attacksimportBasicIterativeMethodfromcleverhans_copy.utilsimportAccuracyReportfromcleverhans_copy.utils_kerasimportKerasModelWrapperfromcleverhans_copy.utils_tfimportmodel_evalfromsklearn.preprocessingimportLabelEncoderimporttensorflow.compat.v1astf# 使用1.0版本的方法tf.disable_v2_behavior() # 禁用2.0版本的方法FLAGS=flags.FLAGSBATCH_SIZE=1024defreaducr(filename):
data=np.loadtxt(filename, delimiter=',')
Y=data[:, 0]
X=data[:, 1:]
returnX, Ydefread_dataset(root_dir, archive_name, dataset_name):
datasets_dict= {}
file_name=root_dir+'/archives/'+archive_name+'/'+dataset_name+'/'+dataset_namex_train, y_train=readucr(file_name+'_TRAIN')
x_test, y_test=readucr(file_name+'_TEST')
datasets_dict[dataset_name] = (x_train.copy(), y_train.copy(), x_test.copy(),
y_test.copy())
returndatasets_dictdeftransform_labels(y_train,y_test):
""" Transform label to min equal zero and continuous For example if we have [1,3,4] ---> [0,1,2] """# init the encoderencoder=LabelEncoder()
# concat train and test to fity_train_test=np.concatenate((y_train,y_test),axis=0)
# fit the encoderencoder.fit(y_train_test)
# transform to min zero and continuous labelsnew_y_train_test=encoder.transform(y_train_test)
# resplit the train and testnew_y_train=new_y_train_test[0:len(y_train)]
new_y_test=new_y_train_test[len(y_train):]
returnnew_y_train, new_y_testdefprepare_data(datasets_dict,dataset_name):
x_train=datasets_dict[dataset_name][0]
y_train=datasets_dict[dataset_name][1]
x_test=datasets_dict[dataset_name][2]
y_test=datasets_dict[dataset_name][3]
nb_classes=len(np.unique(np.concatenate((y_train, y_test), axis=0)))
# make the min to zero of labelsy_train, y_test=transform_labels(y_train, y_test)
# save orignal y because later we will use binaryy_true=y_test.astype(np.int64)
# transform the labels from integers to one hot vectorsenc=sklearn.preprocessing.OneHotEncoder()
enc.fit(np.concatenate((y_train, y_test), axis=0).reshape(-1, 1))
y_train=enc.transform(y_train.reshape(-1, 1)).toarray()
y_test=enc.transform(y_test.reshape(-1, 1)).toarray()
iflen(x_train.shape) ==2: # if univariate# add a dimension to make it multivariate with one dimensionx_train=x_train.reshape((x_train.shape[0], x_train.shape[1], 1))
x_test=x_test.reshape((x_test.shape[0], x_test.shape[1], 1))
returnx_train, y_train, x_test, y_test,y_true, nb_classesdefcreate_directory(directory_path):
ifos.path.exists(directory_path):
returnNoneelse:
os.makedirs(directory_path)
returndirectory_pathdefadd_labels_to_adv_test_set(dataset_dict,dataset_name, adv_data_dir,original_y):
x_test_perturbed=np.loadtxt(adv_data_dir+dataset_name+'-adv', delimiter=',')
test_set=np.zeros((original_y.shape[0],x_test_perturbed.shape[1]+1),dtype=np.float64)
test_set[:,0] =original_ytest_set[:,1:] =x_test_perturbednp.savetxt(adv_data_dir+dataset_name+'-adv',test_set,delimiter=',')
deftsc_tutorial(attack_method='fgsm',batch_size=BATCH_SIZE,
dataset_name='Adiac',eps=0.1,attack_on='train'):
# keras.layers.core.K.set_learning_phase(0)# Object used to keep track of (and return) key accuraciesreport=AccuracyReport()
# Set TF random seed to improve reproducibilitytf.set_random_seed(1234)
ifnothasattr(backend, "tf"):
raiseRuntimeError("This tutorial requires keras to be configured"" to use the TensorFlow backend.")
# if keras.backend.image_dim_ordering() != 'tf':# keras.backend.set_image_dim_ordering('tf')# print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' to "# "'th', temporarily setting to 'tf'")# Create TF session and set as Keras backend sessionsess=tf.Session()
keras.backend.set_session(sess)
# dataset_name = 'Adiac'classifier_name='resnet'root_dir='D:/Project/ijcnn19attacks/'archive_name='TSC'out_dir='ucr-attack/'file_path=root_dir+'results/'+classifier_name+'/'+archive_name+ \
'/'+dataset_name+'/best_model.hdf5'adv_data_dir=out_dir+attack_method+'/'+archive_name+'/'+attack_on+\
'/eps-'+str(eps)+'/'ifos.path.exists(adv_data_dir+dataset_name+'-adv'):
print('Already_done:',dataset_name)
returnelse:
print('Doing:',dataset_name)
dataset_dict=read_dataset(root_dir, archive_name, dataset_name)
x_train, y_train, x_test, y_test, _, nb_classes=prepare_data(dataset_dict,dataset_name)
ifattack_on=='train':
X=x_trainY=y_trainoriginal_y=dataset_dict[dataset_name][1]
elifattack_on=='test':
X=x_testY=y_testoriginal_y=dataset_dict[dataset_name][3]
else:
print('Error either train or test options for attack_on param')
exit()
# for big datasets we should decompose in batches the evaluation of the attack# loop through the batchesori_acc=0adv_acc=0res_dir=out_dir+'results'+attack_method+'.csv'ifos.path.exists(res_dir):
res_ori=pd.read_csv(res_dir, index_col=False)
else:
res_ori=pd.DataFrame(data=np.zeros((0, 3), dtype=float), index=[],
columns=['dataset_name', 'ori_acc', 'adv_acc'])
test_set=np.zeros((Y.shape[0], x_train.shape[1] +1), dtype=np.float64)
foriinrange(0,len(X),batch_size):
curr_X=X[i:i+batch_size]
curr_Y=Y[i:i+batch_size]
# Obtain series Parametersimg_rows, nchannels=x_train.shape[1:3]
# Define input TF placeholderx=tf.placeholder(tf.float32, shape=(None, img_rows, nchannels))
y=tf.placeholder(tf.float32, shape=(None, nb_classes))
# Define TF model graphmodel=keras.models.load_model(file_path)
preds=model(x)
print("Defined TensorFlow model graph.")
defevaluate():
# Evaluate the accuracy of the model on legitimate test exampleseval_params= {'batch_size': batch_size}
acc=model_eval(sess, x, y, preds, curr_X, curr_Y, args=eval_params)
report.clean_train_clean_eval=accprint('Test accuracy on legitimate examples: %0.4f'%acc)
returnaccwrap=KerasModelWrapper(model)
ori_acc+=evaluate() *len(curr_X)/len(X)
ifattack_method=='fgsm':
# Initialize the Fast Gradient Sign Method (FGSM) attack object and graphfgsm=FastGradientMethod(wrap, sess=sess)
fgsm_params= {'eps': eps }
adv_x=fgsm.generate(x, **fgsm_params)
elifattack_method=='bim':
# BasicIterativeMethodbim=BasicIterativeMethod(wrap,sess=sess)
bim_params= {'eps':eps, 'eps_iter':0.05, 'nb_iter':10}
adv_x=bim.generate(x,**bim_params)
else:
print('Either bim or fgsm are acceptable as attack methods')
return# Consider the attack to be constantadv_x=tf.stop_gradient(adv_x)
adv=adv_x.eval({x: curr_X}, session=sess)
adv=adv.reshape(adv.shape[0],adv.shape[1])
preds_adv=model(adv_x)
# Evaluate the accuracy of the model on adversarial exampleseval_par= {'batch_size': batch_size}
acc=model_eval(sess, x, y, preds_adv, curr_X, curr_Y, args=eval_par)
print('Test accuracy on adversarial examples: %0.4f\n'%acc)
report.clean_train_adv_eval=accadv_acc+=acc*len(curr_X)/len(X)
test_set[i:i+batch_size,0] =original_y[i:i+batch_size]
test_set[i:i+batch_size,1:] =advcreate_directory(adv_data_dir)
np.savetxt(adv_data_dir+dataset_name+'-adv',test_set, delimiter=',')
add_labels_to_adv_test_set(dataset_dict, dataset_name, adv_data_dir,original_y)
res=pd.DataFrame(data=np.zeros((1,3),dtype=float), index=[0],
columns=['dataset_name','ori_acc','adv_acc'])
res['dataset_name'] =dataset_name+str(eps)
res['ori_acc'] =ori_accres['adv_acc'] =adv_accres_ori=pd.concat((res_ori,res),sort=False)
res_ori.to_csv(res_dir,index=False)
returnreportdefmain(argv=None,attack_method='fgsm'):
flags.DEFINE_integer('batch_size', BATCH_SIZE, 'Size of training batches')
# full datasetsdataset_names= ['50words', 'Adiac', 'ArrowHead', 'Beef', 'BeetleFly', 'BirdChicken', 'Car', 'CBF',
'ChlorineConcentration', 'CinC_ECG_torso', 'Coffee',
'Computers', 'Cricket_X', 'Cricket_Y', 'Cricket_Z', 'DiatomSizeReduction',
'DistalPhalanxOutlineAgeGroup', 'DistalPhalanxOutlineCorrect', 'DistalPhalanxTW',
'Earthquakes', 'ECG200', 'ECG5000', 'ECGFiveDays', 'ElectricDevices', 'FaceAll', 'FaceFour',
'FacesUCR', 'FISH', 'FordA', 'FordB', 'Gun_Point', 'Ham', 'HandOutlines',
'Haptics', 'Herring', 'InlineSkate', 'InsectWingbeatSound', 'ItalyPowerDemand',
'LargeKitchenAppliances', 'Lighting2', 'Lighting7', 'MALLAT', 'Meat', 'MedicalImages',
'MiddlePhalanxOutlineAgeGroup', 'MiddlePhalanxOutlineCorrect', 'MiddlePhalanxTW', 'MoteStrain',
'NonInvasiveFatalECG_Thorax1', 'NonInvasiveFatalECG_Thorax2', 'OliveOil',
'OSULeaf', 'PhalangesOutlinesCorrect', 'Phoneme', 'Plane', 'ProximalPhalanxOutlineAgeGroup',
'ProximalPhalanxOutlineCorrect', 'ProximalPhalanxTW', 'RefrigerationDevices',
'ScreenType', 'ShapeletSim', 'ShapesAll', 'SmallKitchenAppliances', 'SonyAIBORobotSurface',
'SonyAIBORobotSurfaceII', 'StarLightCurves', 'Strawberry', 'SwedishLeaf', 'Symbols',
'synthetic_control', 'ToeSegmentation1', 'ToeSegmentation2', 'Trace', 'TwoLeadECG', 'Two_Patterns',
'UWaveGestureLibraryAll', 'uWaveGestureLibrary_X', 'uWaveGestureLibrary_Y',
'uWaveGestureLibrary_Z', 'wafer', 'Wine', 'WordsSynonyms', 'Worms', 'WormsTwoClass', 'yoga']
# dataset_names = ['Coffee']# epsilons = np.arange(start=0.0,stop=2.0,step=0.025,dtype=np.float32)epsilons= [0.1]
fordataset_nameindataset_names:
forepinepsilons:
tsc_tutorial(attack_method=attack_method,
batch_size=FLAGS.batch_size,
dataset_name=dataset_name,
eps=ep)
# if __name__ == '__main__':# flags.DEFINE_integer('nb_epochs', NB_EPOCHS,# 'Number of epochs to train model')# flags.DEFINE_integer('batch_size', BATCH_SIZE, 'Size of training batches')# flags.DEFINE_float('learning_rate', LEARNING_RATE,# 'Learning rate for training')# flags.DEFINE_string('train_dir', TRAIN_DIR,# 'Directory where to save model.')# flags.DEFINE_string('filename', FILENAME, 'Checkpoint filename.')# flags.DEFINE_boolean('load_model', LOAD_MODEL,# 'Load saved model or train.')# tf.app.run()
The text was updated successfully, but these errors were encountered:
@GeYuYao-hub Thanks for your comments. There are one another minor comments on your instruction:
The numpy 1.24.2 conflicts with other requirements
" The user requested numpy==1.24.2
pandas 1.3.5 depends on numpy>=1.17.3; platform_machine != "aarch64" and platform_machine != "arm64" and python_version < "3.10"
scikit-learn 1.0.2 depends on numpy>=1.14.6
scipy 1.7.3 depends on numpy<1.23.0 and >=1.16.5
"
Therefor pip-requirement.txt could be like:
keras==2.10.0
numpy
pandas==1.3.5
scikit-learn==1.0.2
scipy==1.7.3
matplotlib==3.6.2
tensorflow-gpu==2.10.1
h5py==3.7.0
My solution for ijcnn19attacks
If I solved your problem, please reply to this issue to let me know.
clone from github
Creating a conda environment
Prerequisites
Code
src/main.py
Modify the code for the main. py header
src/cleverhans_copy/utils_tf.py
Add the following code
src/cleverhans_copy/compat.py
Add the following code
src/cleverhans_copy/attacks.py
Delete the original content in line 215 and replace it with the following
src/cleverhans_copy/utils_keras.py
Add the following code
Modify
def fprop
to the followingsrc/cleverhans_tutorials/tsc_tutorial_keras_tf.py
Cover
tsc_tutorial_keras_tf.py
with the followingThe text was updated successfully, but these errors were encountered: