Posts

CNN

Load image files and make the Convolutional Deep learnig model. images = os.listdir(~~~path) # image load and print img = Image.open("PATH AND FILE").resize((?, ?)) plt.title('NAME') plt.imshow(img) plt.show() # image to array and scaling img = Image.open ("PATH AND FILE").resize((?, ?)) img = np.array(img)/255. # after work, chage array again list_arr = np.array(img_list) # handle as x, y # or img2 = image.img_to_array(img_origin) img2 = img2.reshape((-1, ?, ?, 3)) img2 = preprocess_input(img2) features = model.predict(img2) #print(decode_predictions(features, top=3)) # Conv Model from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D model = Sequential() model.add(Conv2D(32, kernel_size=(5,5), strides=(1,1), padding='same', activation='relu', input_shape=(?,)))) model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.2)) model.add(Flat

LSTM model using text data

Deep learning example code with text data # Searching letters df['column'][df['column'].str.contains('[^a-z ]')].sum() # Replace letters df['column'] = df['column'].str.replace('[^a-z ]','') # delete sentence's head and tail's empty word df['column'] = df['column'].str.strip() # search duplicate data df['column'].duplicated().sum() # drop duplicate data df.drop_duplicates(subset=['column'], inplace=True) # Graph using value_counts() df['column'].value_counts().plot(kind='bar') # Split X and Y features = df['column'].values labels = df['column'].values # TF-IDF from sklearn.feature_extraction.text import TfidfVectorizer tfidf = TfidfVectorizer() x_train_v = tfidf.fit_transform(x_train) x_test_v = tfidf.transform(x_test) # Tokenizing from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_se

Voting Ensemble Model

By using the [voting ensemble model], we can learn multiple models respectively and compare the results. from sklearn.linear_model import LinearRegression as lr from xgboost import XGBRegressor as xgb from sklearn.ensemble import RandomForestRegressor as rfr from sklearn.ensemble import GradientBoostingRegressor as grb from sklearn.ensemble import VotingRegressor import joblib import time model_list=[lr(), rfr(), grb(), xgb()] # Convert mutli dimension array to single dimension array. train_y = train_y.to_numpy().flatten() # Save several models after learning sequence. model_result = [] for i in range(len(model_list)): model = model_list[i] model.fit(train_x, train_y) pred_y = model.predict(test_x) model_result.append(model) # Models list voting_models = [ ('linear_reg', model_rslt[0]), ('randForest', model_rslt[1]), ('gradBoost', model_rslt[2]), ('xgboost', model_rslt[3]) ] # Run VotingRegressor voting_

Graph

Several modules that will work with Python offer a variety of functions. Among them, I drew and compared graphs using the matplotlib library that helps me draw graphs efficiently and Pandas' own plot function. # default bar graph df['column'].value_counts().plot(kind='bar') # sub graph plt.subplot(100) df['column1'].plot(kind='hist') plt.subplot(150) plt.hist(df['column2']) plt.show() # seaborn histogram graph sns.histplot(data=df, x='column') # seaborn histogram graph with hue color sns.histplot(data=df, x='column', hue='column color') # seaborn histogram with round end sns.kdeplot(data=df, x='column', hue='column color')

Test

Testing def FirstFunc(text): text += "!" print("Hello Blogger!")