程序问答   发布时间:2022-06-02  发布网站:大佬教程  code.js-code.com
大佬教程收集整理的这篇文章主要介绍了如何进行超像素图像分割和特征提取大佬教程大佬觉得挺不错的,现在分享给大家,也给大家做个参考。

如何解决如何进行超像素图像分割和特征提取?

开发过程中遇到如何进行超像素图像分割和特征提取的问题如何解决?下面主要结合日常开发的经验,给出你关于如何进行超像素图像分割和特征提取的解决方法建议,希望对你解决如何进行超像素图像分割和特征提取有所启发或帮助;

我对皮肤组织的多类分割感兴趣,我将 3000 个皮肤组织标签分为 4 类,我创建了一个 CNN 分类算法来训练我的分类模型。我想将分类模型用于新皮肤组织图像的分割任务,并对属于每个类的皮肤组织进行特征提取

以下是用于训练我的分类模型的代码

from tensorflow.keras.layers import input,Concatenate,Dropout,Flatten,Dense,GlobalAveragePooling2D,Conv2D
from tensorflow.keras import BACkend as K
#from tensorflow.keraS.Utils import np_utils
from tensorflow.keraS.Utils import to_categorical
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import optimizers
from tensorflow.keras.metrics import top_k_categorical_accuracy
from tensorflow.keras.models import Sequential,Model,load_model
import tensorflow as tf
from tensorflow.keras.initializers import he_uniform
from tensorflow.keras.callBACks import Modelcheckpoint,LearningRatescheduler,TensorBoard,EarlyStopPing,CSVLogger,ReduceLROnPlateau
#from tensorflow.compat.keras.BACkend import KTF
#import keras.BACkend.tensorflow_BACkend as KTF
from tensorflow.keras.applications.resnet50 import resnet50
from tensorflow.keras.applications.inception_v3 import InceptionV3

import os
import matplotlib.pylab as plt
import numpy as np
import pandas as pd
#import numpy as np,Pillow,skimage,imageio,matplotlib
#from scipy.misc import imresize
from skimage.transform import resize
from tqdm import tqdm
from tensorflow.keras import metrics

#### PREPROCESS STAGE ####

# Path to superpixels class files
classes_file = "/home/DEV/SKIN_3000_CLASSEs.csv"
concatenated_data= pd.read_csv(classes_file,header=NonE)

# Instances with targets
targets = concatenated_data[1].toList()

# Split data according to their classes
class_0 = concatenated_data[concatenated_data[1] == 0]
class_1 = concatenated_data[concatenated_data[1] == 1]
class_2 = concatenated_data[concatenated_data[1] == 2]
class_3 = concatenated_data[concatenated_data[1] == 3]

# Holdout split Train/test set (Other options are k-folds or leave-one-out)
split_proportion = 0.8

split_size_0 = int(len(class_0)*split_proportion)
split_size_1 = int(len(class_1)*split_proportion)
split_size_2 = int(len(class_2)*split_proportion)
split_size_3 = int(len(class_3)*split_proportion)

new_class_0_Train = np.random.choice(len(class_0),split_size_0,replace=falsE)
new_class_0_Train = class_0.iloc[new_class_0_Train]
new_class_0_test = ~class_0.iloc[:][0].isin(new_class_0_Train.iloc[:][0])
new_class_0_test = class_0[new_class_0_test]

new_class_1_Train = np.random.choice(len(class_1),split_size_1,replace=falsE)
new_class_1_Train = class_1.iloc[new_class_1_Train]
new_class_1_test = ~class_1.iloc[:][0].isin(new_class_1_Train.iloc[:][0])
new_class_1_test = class_1[new_class_1_test]

new_class_2_Train = np.random.choice(len(class_2),split_size_2,replace=falsE)
new_class_2_Train = class_2.iloc[new_class_2_Train]
new_class_2_test = ~class_2.iloc[:][0].isin(new_class_2_Train.iloc[:][0])
new_class_2_test = class_2[new_class_2_test]

new_class_3_Train = np.random.choice(len(class_3),split_size_3,replace=falsE)
new_class_3_Train = class_3.iloc[new_class_3_Train]
new_class_3_test = ~class_3.iloc[:][0].isin(new_class_3_Train.iloc[:][0])
new_class_3_test = class_3[new_class_3_test]

x_Train_List = pd.concat(
    [new_class_0_Train,new_class_1_Train,new_class_2_Train,new_class_3_Train])
x_test_List = pd.concat(
    [new_class_0_test,new_class_1_test,new_class_2_test,new_class_3_test])

# Load superpixels files
imagePath = "/home/DEV/SKIN_SET_3000/"

x_Train = []
y_Train = []
for index,row in tqdm(x_Train_List.iterrows(),@R_631_10586@l=x_Train_List.shape[0]):
    try:
        loadedImage = plt.imread(imagePath + str(row[0]) + ".jpg")
        x_Train.append(loadedImagE)
        y_Train.append(row[1])
    except:
        # Try with .png file format if images are not properly loaded
        try:
            loadedImage = plt.imread(imagePath + str(row[0]) + ".png")
            x_Train.append(loadedImagE)
            y_Train.append(row[1])
        except:
            # Print file names whenever it is impossible to load image files
            print(imagePath + str(row[0]))

x_test = []
y_test = []
for index,row in tqdm(x_test_List.iterrows(),@R_631_10586@l=x_test_List.shape[0]):
    try:
        loadedImage = plt.imread(imagePath + str(row[0]) + ".jpg")
        x_test.append(loadedImagE)
        y_test.append(row[1])
    except:
        # Try with .png file format if images are not properly loaded
        try:
            loadedImage = plt.imread(imagePath + str(row[0]) + ".png")
            x_test.append(loadedImagE)
            y_test.append(row[1])
        except:
            # Print file names whenever it is impossible to load image files
            print(imagePath + str(row[0]))


# Reescaling of images 
img_wIDth,img_height = 139,139

index = 0
for image in tqdm(x_Train):
    #aux = resize(image,(img_wIDth,img_height,3),"bilinear")
    aux = resize(image,img_height))
    x_Train[index] = aux / 255.0  # normalization
    index += 1

index = 0
for image in tqdm(x_test):
    #aux = resize(image,img_height))
    x_test[index] = aux / 255.0  # normalization
    index += 1




#### TraiNING STAGE ####

os.environ["KERAS_BACKEND"] = "tensorflow"
RANDOM_STATE = 42

def get_session(gpu_fraction=0.8):

    num_threads = os.environ.get('OMP_NUM_THREADS')
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)

    if num_threads:
        return tf.Session(config=tf.ConfigProto(
            gpu_options=gpu_options,intra_op_parallelism_threads=num_threads))
    else:
        return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))


#KTF.set_session(get_session())


def precision(y_true,y_pred):

    true_positives = K.sum(K.round(K.clip(y_true * y_pred,1)))
    preDicted_positives = K.sum(K.round(K.clip(y_pred,1)))
    precision = true_positives / (preDicted_positives + K.epsilon())
    return precision


def recall(y_true,1)))
    possible_positives = K.sum(K.round(K.clip(y_true,1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall


def fbeta_score(y_true,y_pred,beta=1):

    if beta < 0:
        raise ValueError('The loWest choosable beta is zero (only precision).')

    # Set F-score as 0 if there are no true positives (sklearn-likE).
    if K.sum(K.round(K.clip(y_true,1))) == 0:
        return 0.0

    p = precision(y_true,y_pred)
    r = recall(y_true,y_pred)
    bb = beta ** 2
    fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
    return fbeta_score


nb_classes = 4
final_model = []


# Option = InceptionV3
model = InceptionV3(weights="imagenet",include_top=false,input_shape=(img_wIDth,3))
# Option = resnet
# model = resnet50(weights="imagenet",input_shape=(3,img_wIDth,img_height))

# CreaTing new outputs for the model
x = model.output
x = Flatten()(X)
x = Dense(512,activation="relu")(X)
x = Dropout(0.5)(X)
x = Dense(512,activation="relu")(X)
x = Dropout(0.5)(X)
preDictions = Dense(nb_classes,activation='softmax')(X)
#preDictions = Dense(nb_classes,activation='sigmoID')(X)
final_model = Model(inputs=model.input,outputs=preDictions)


# Metrics
learningRate = 0.001
optimizer = optimizers.SGD(learning_rate=learningRate,momentum=0.88,nesterov=TruE)

# Compiling the model...
final_model.compile(loss="categorical_crossentropy",optimizer=optimizer,metrics=["accuracy",fbeta_score])
final_model.sumMary()

#final_model.compile(loss = 'categorical_crossentropy',optimizer = 'adam',metrics = ['accuracy'])

#model.compile(loss = 'sparse_categorical_crossentropy',metrics = ['accuracy'])

#x_Train = np.array(x_Train)
#x_test = np.array(x_test)

x_Train = np.asarray(x_Train).astype(np.float32)
#x_test = np.array(x_test)
x_test = np.asarray(x_test).astype(np.float32)

# Defining targets...
y_Train = np.concatenate([np.full((new_class_0_Train.shape[0]),0),np.full((new_class_1_Train.shape[0]),1),np.full((new_class_2_Train.shape[0]),2),np.full((new_class_3_Train.shape[0]),3)])

y_test = np.concatenate([np.full((new_class_0_test.shape[0]),np.full((new_class_1_test.shape[0]),np.full((new_class_2_test.shape[0]),np.full((new_class_3_test.shape[0]),3)])

y_Train = to_categorical(y_Train)
y_test =  to_categorical(y_test)

modelfilename = "/home/DEV/SKIN_SET_3000/model_inception.h5"

Trainingfilename = "/home/DEV/SKIN_SET_3000/Training.csv"
nb_Train_samples = y_Train.shape[0]
nb_test_samples = y_test.shape[0]
#epochs = 10000
epochs = 100
batch_size = 24
TrainingPatIEnce = 200
decayPatIEnce = TrainingPatIEnce / 4

# SetTing the data generator...
Train_datagen = ImageDataGenerator(
    horizontal_flip=True,fill_mode="reflect",zoom_range=0.2
)

Train_generator = Train_datagen.flow(x_Train,y_Train,batch_size=batch_sizE)

# Saving the model
checkpoint = Modelcheckpoint(modelfilename,monitor='val_accuracy',verbose=1,save_best_only=True,save_weights_only=false,mode='auto',save_freq=1)

adaptativeLearningRate = ReduceLROnPlateau(monitor='val_accuracy',factor=0.5,patIEnce=decayPatIEnce,min_delta=0.0001,cooldown=0,min_lr=1e-8)

early = EarlyStopPing(monitor='val_accuracy',min_delta=0,patIEnce=TrainingPatIEnce,mode='auto')

csv_logger = CSVLogger(Trainingfilename,separator=",",append=falsE)

# CallBACks
callBACks = [checkpoint,early,csv_logger,adaptativeLearningRate]

# Training of the model
final_model.fit(Train_generator,steps_per_epoch=nb_Train_samples / batch_size,epochs=epochs,shuffle=True,valIDation_data=(x_test,y_test),valIDation_steps=nb_test_samples / batch_size,callBACks=callBACks)

final_model.save('/home/DEV/SKIN_SET_3000/model_inception.h5')
#compile metrics

为了分割我的图像,首先我使用 SliC 将我的输入图像转换为超像素

from skimage.segmentation import slic
from skimage.segmentation import mark_boundarIEs
from skimage.util import img_as_float
from skimage import io; io.use_plugin('matplotlib')
import cv2 as cv
from skimage.color import label2rgb

img_wIDth,139

# load the model we saved
model = load_model('/home/DEV/SKIN_SET_3000/model_inception.h5',compile=falsE)

# Get test image ready
 img = skimage.img_as_float(skimage.io.imread('/home/DEV/SKIN_ulCER.jpg')).astype(np.float32)
plt.imshow(img)
test_image_slic = slic(img,n_segments=500,compactness=10.0)
test_image_slic_out = mark_boundarIEs(img,test_image_sliC)
plt.imshow(test_image_slic_out)
#test_image=test_image/255
test_image_array = np.array(test_image_slic_out)
test_image_resize = cv2.resize(test_image_array,img_height))
test_image_reshape = test_image_resize.reshape(1,3)

我想检查我输入的每个超像素是否被标记为 4 个组织类中的目标类之一,并提取属于每个类的特征作为掩码并量化掩码的总表面积。 任何关于如何实施这种方法的建议将不胜感激。

解决方法

暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!

如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。

小编邮箱:dio#foxmail.com (将#修改为@)

大佬总结

以上是大佬教程为你收集整理的如何进行超像素图像分割和特征提取全部内容,希望文章能够帮你解决如何进行超像素图像分割和特征提取所遇到的程序开发问题。

如果觉得大佬教程网站内容还不错,欢迎将大佬教程推荐给程序员好友。

本图文内容来源于网友网络收集整理提供,作为学习参考使用,版权属于原作者。
如您有任何意见或建议可联系处理。小编QQ:384754419,请注明来意。