LITS挑战肝分割

config

import logging# Logging level#日志记录级别
log_level = logging.INFOlogfile = 'output.txt'# Number of CPUs used for parallel processing#用于并行处理的CPU数量
N_PROC = 14#Maximum number of iterations before optimisation is stopped#优化停止前的最大迭代次数
MAX_N_IT = -1# Image/Seg shape#图片/ SEG形状
slice_shape = (388,388)#Initial Parameters#初始参数
params_initial_liver = [\
3,  # pos_x_std
0.75,  # pos_y_std
3,  # pos_z_std
60,  # bilateral_x_std
15,  # bilateral_y_std
15,  # bilateral_z_std
20.0, # bilateral_intensity_std
0.75,  # pos_w
1.0   # bilateral_w  #we fix this one during optimization#WE解决优化过程中这一个
]params_initial_lesion = [\
3.0,  # pos_x_std
3.0,  # pos_y_std
3.0,  # pos_z_std
60.0,  # bilateral_x_std
60.0,  # bilateral_y_std
60.0,  # bilateral_z_std
20.0, # bilateral_intensity_std
3.0,  # pos_w
10.0   # bilateral_w  #we fix this one during optimization#WE解决优化过程中这一个
]### CHOOSE LIVER OR LESION# ## CHOOSE肝或病变
params_initial = params_initial_liver
target_label = 1#Fixed CRF Parameters#固定CRF参数
max_iterations = 20
dynamic_z = False
ignore_memory = True###########################
##### 3DIRCA DATASET ######
###########################test_set=[
(82, '/home/guest/training/volume-82.npy', '/home/guest/training/segmentation-82.npy') ,
(74, '/home/guest/training/volume-74.npy', '/home/guest/training/segmentation-74.npy') ,
(125, '/home/guest/training/volume-125.npy', '/home/guest/training/segmentation-125.npy') ,
(11, '/home/guest/training/volume-11.npy', '/home/guest/training/segmentation-11.npy') ,
(89, '/home/guest/training/volume-89.npy', '/home/guest/training/segmentation-89.npy') ,
(78, '/home/guest/training/volume-78.npy', '/home/guest/training/segmentation-78.npy') ,
(64, '/home/guest/training/volume-64.npy', '/home/guest/training/segmentation-64.npy') ,
(126, '/home/guest/training/volume-126.npy', '/home/guest/training/segmentation-126.npy') ,
(129, '/home/guest/training/volume-129.npy', '/home/guest/training/segmentation-129.npy') ,
(114, '/home/guest/training/volume-114.npy', '/home/guest/training/segmentation-114.npy') ,
(37, '/home/guest/training/volume-37.npy', '/home/guest/training/segmentation-37.npy') ,
(25, '/home/guest/training/volume-25.npy', '/home/guest/training/segmentation-25.npy') ,
(85, '/home/guest/training/volume-85.npy', '/home/guest/training/segmentation-85.npy') ,
(80, '/home/guest/training/volume-80.npy', '/home/guest/training/segmentation-80.npy') ,
(27, '/home/guest/training/volume-27.npy', '/home/guest/training/segmentation-27.npy') ,
(18, '/home/guest/training/volume-18.npy', '/home/guest/training/segmentation-18.npy') ,
(69, '/home/guest/training/volume-69.npy', '/home/guest/training/segmentation-69.npy') ,
(40, '/home/guest/training/volume-40.npy', '/home/guest/training/segmentation-40.npy') ,
(61, '/home/guest/training/volume-61.npy', '/home/guest/training/segmentation-61.npy') ,
(117, '/home/guest/training/volume-117.npy', '/home/guest/training/segmentation-117.npy') ,
(44, '/home/guest/training/volume-44.npy', '/home/guest/training/segmentation-44.npy') ,
(26, '/home/guest/training/volume-26.npy', '/home/guest/training/segmentation-26.npy') ,
(91, '/home/guest/training/volume-91.npy', '/home/guest/training/segmentation-91.npy') ,
(65, '/home/guest/training/volume-65.npy', '/home/guest/training/segmentation-65.npy') ,
(55, '/home/guest/training/volume-55.npy', '/home/guest/training/segmentation-55.npy') ,
(5, '/home/guest/training/volume-5.npy', '/home/guest/training/segmentation-5.npy') ,
(77, '/home/guest/training/volume-77.npy', '/home/guest/training/segmentation-77.npy') ,
(12, '/home/guest/training/volume-12.npy', '/home/guest/training/segmentation-12.npy') ,
(28, '/home/guest/training/volume-28.npy', '/home/guest/training/segmentation-28.npy') ,
(6, '/home/guest/training/volume-6.npy', '/home/guest/training/segmentation-6.npy') ,
(79, '/home/guest/training/volume-79.npy', '/home/guest/training/segmentation-79.npy') ,
(84, '/home/guest/training/volume-84.npy', '/home/guest/training/segmentation-84.npy') ,
(103, '/home/guest/training/volume-103.npy', '/home/guest/training/segmentation-103.npy') ,
(101, '/home/guest/training/volume-101.npy', '/home/guest/training/segmentation-101.npy') ,
(106, '/home/guest/training/volume-106.npy', '/home/guest/training/segmentation-106.npy') ,
(59, '/home/guest/training/volume-59.npy', '/home/guest/training/segmentation-59.npy') ,
(45, '/home/guest/training/volume-45.npy', '/home/guest/training/segmentation-45.npy') ,
(53, '/home/guest/training/volume-53.npy', '/home/guest/training/segmentation-53.npy') ,
(41, '/home/guest/training/volume-41.npy', '/home/guest/training/segmentation-41.npy') ,
(121, '/home/guest/training/volume-121.npy', '/home/guest/training/segmentation-121.npy')]# Select dataset#选择数据集
#dataset数据集 = [irca_train_fold1, irca_test_fold1,\
#		irca_train_fold2, irca_test_fold2,\
#		irca_train_fold3, irca_test_fold3,\
#		irca_train_fold4, irca_test_fold4]
#
# Datset测试
dataset = test_set

CRF优化

#! /usr/bin/env pythonimport numpy as npimport logging
import configfrom denseinference import CRFProcessorfrom multiprocessing import Pool, Managerimport nibabel as nibimport scipy.misc
import osimport medpy.metric# global list for volumes#voluum全局列表
volumes = []# best results so far#目前为止效果最好
best_dice = -1
best_params = Nonen_iterations = 0IMG_DTYPE = np.float
SEG_DTYPE = np.uint8def to_scale(img, shape=None):if shape is None:shape = config.slice_shapeheight, width = shapeif img.dtype == SEG_DTYPE:return scipy.misc.imresize(img, (height, width), interp="nearest").astype(SEG_DTYPE)elif img.dtype == IMG_DTYPE:factor = 256.0 / np.max(img)return (scipy.misc.imresize(img, (height, width), interp="nearest") / factor).astype(IMG_DTYPE)else:raise TypeError('Error. To scale the image array, its type must be np.uint8 or np.float64. (' + str(img.dtype) + ')')def norm_hounsfield_dyn(arr, c_min=0.1, c_max=0.3):""" Converts from hounsfield units to float64 image with range 0.0 to 1.0 """# calc min and max#,计算的最小和最大min, max = np.amin(arr), np.amax(arr)if min <= 0:arr = np.clip(arr, min * c_min, max * c_max)# right shift to zero#右移到零arr = np.abs(min * c_min) + arrelse:arr = np.clip(arr, min, max * c_max)# left shift to zero #左移到零arr = arr - min# normalization#正常化norm_fac = np.amax(arr)if norm_fac != 0:norm = np.divide(np.multiply(arr, 255),np.amax(arr))else:  # don't divide through 0#不要除以0norm = np.multiply(arr, 255)norm = np.clip(np.multiply(norm, 0.00390625), 0, 1)return normdef histeq_processor(img):"""Histogram equalization"""“”“直方图均衡”“”nbr_bins = 256# get image histogram#获取图像的直方图imhist, bins = np.histogram(img.flatten(), nbr_bins, normed=True)cdf = imhist.cumsum()  # cumulative distribution function#累积分布函数cdf = 255 * cdf / cdf[-1]  # normalize#正常化# use linear interpolation of cdf to find new pixel values#使用cdf的线性插值来查找新的像素值original_shape = img.shapeimg = np.interp(img.flatten(), bins[:-1], cdf)img = img / 256.0return img.reshape(original_shape)def process_img_label(imgvol, segvol):"""Process a given image volume and its label and return arrays as a new copy处理给定的图像卷及其标签,并将数组作为新副本返回:param imgvol::param label_vol::return:"""imgvol_downscaled = np.zeros((config.slice_shape[0], config.slice_shape[1], imgvol.shape[2]))segvol_downscaled = np.zeros((config.slice_shape[0], config.slice_shape[1], imgvol.shape[2]))imgvol[imgvol > 1200] = 0for i in range(imgvol.shape[2]):# Get the current slice, normalize and downscale#获取当前切片,规范化和缩减slice = np.copy(imgvol[:, :, i])slice = norm_hounsfield_dyn(slice)slice = to_scale(slice, config.slice_shape)slice = histeq_processor(slice)imgvol_downscaled[:, :, i] = slice# downscale the label slice for the crf#缩减标签片用于慢性肾功能衰竭segvol_downscaled[:, :, i] = to_scale(segvol[:, :, i], config.slice_shape)return [imgvol_downscaled, segvol_downscaled]def crf_worker(img, label, probvol, crfsettings):"""Worker function for Parallel CRF Processing of multiple Volumes并行CRF处理多个卷的工作器函数:param img::param label::param prob::param crfsettings::return:  dice"""pro = CRFProcessor.CRF3DProcessor(**crfsettings)# print "started crf"#打印“开始CRF”# print np.min(img), np.max(img)  #打印np.min(IMG),np.max(IMG)result = pro.set_data_and_run(img, probvol)# print np.unique(result)  #打印np.unique(结果)# print "done with crf"#打印“慢性肾功能衰竭做”_dice = medpy.metric.dc(result == 1, label == config.target_label)print "Dice of single volume: " + str(_dice)# not sure if that's necessary#不确定是否有必要del proreturn _dicedef run_crf(params, grad):""":param pos_x_std::param bilateral_x_std::param bilateral_intensity_std::param pos_w::param bilateral_w::return:"""global best_dice, best_params, volumes, n_iterationsn_iterations += 1# Stupid NLopt it always wants a grad even for algorithms that don't use gradient. If grad is not empty, something is wrong.# print grad#愚蠢NLopt它总是想即使不使用梯度算法的毕业生。如果毕业不是空的,那就错了。#打印毕业生pos_x_std, pos_y_std, pos_z_std, bilateral_x_std, bilateral_y_std, bilateral_z_std, bilateral_intensity_std, pos_w, bilateral_w = params# 	logging.info("=======================")# 	logging.info("Running CRF with the following parameters使用以下参数运行CRF:")# 	logging.info("pos x std: " + str(pos_x_std))# 	logging.info("pos y std: " + str(pos_y_std))# 	logging.info("pos z std: " + str(pos_z_std))# 	logging.info("pos w: " + str(pos_w))# 	logging.info("bilateral x std: " + str(bilateral_x_std))# 	logging.info("bilateral y std: " + str(bilateral_y_std))# 	logging.info("bilateral z std: " + str(bilateral_z_std))# 	logging.info("bilateral intensity std双边强度标准: " + str(bilateral_intensity_std))# 	logging.info("bilateral w: " + str(bilateral_w))# Here's something to come#这是未来的事情crfsettings = dict(max_iterations=config.max_iterations,pos_x_std=pos_x_std,pos_y_std=pos_y_std,pos_z_std=pos_z_std,pos_w=pos_w,bilateral_x_std=bilateral_x_std,bilateral_y_std=bilateral_y_std,bilateral_z_std=bilateral_z_std,bilateral_intensity_std=bilateral_intensity_std,bilateral_w=bilateral_w,dynamic_z=config.dynamic_z,ignore_memory=config.ignore_memory)# list of dice scores#骰子得分列表dices = []# list of pipes#管道列表results = []pool = Pool(processes=config.N_PROC)# start results  #开始结果for img, label, voxelsize, prob in volumes:# Normalize z std according to volume's voxel slice spacingcopy_crfsettings = dict(crfsettings)copy_crfsettings['pos_z_std'] *= voxelsize[2]  # z std grows with larger spacing between slicesresults.append(pool.apply_async(crf_worker, (img, label, prob, crfsettings)))# dices.append(crf_worker(img,label,prob,crfsettings))# get results#得到结果for p in results:dices.append(p.get())pool.close()dice_average = np.average(dices)logging.info("-----------------------")logging.info("Iteration迭代 : " + str(n_iterations))logging.info("Best avg dice was最好的平均值是: " + str(best_dice))logging.info("   with best params : " + str(best_params))logging.info("Current avg dice is当前平均骰子是: " + str(dice_average))logging.info("   with current params :" + str(params))logging.info("=======================")if dice_average >= best_dice:best_params = paramsbest_dice = dice_averageprint 'FOUND BETTER PARAMS'打印 '找到更好的参数'return dice_average

实验数据

import logging# Logging level#日志记录级别
log_level = logging.WARNING# Takes only the first n volumes. Useful to create small datasets fast#仅占前n个卷。用于快速创建小型数据集
max_volumes = -1# Pre-write processing#预写处理
# Processors applied to images/segmentations right before persisting them to database (after augmentation...etc)#处理器在将它们持久化到数据库之前应用于图像/分段(在增强之后......等)
# A processor takes 2 images img and seg, and returns a tuple (img,seg)#一个处理器需要2个图像img和seg,并返回一个元组(img,seg)
# Available processors:#可用处理器:
#  - processors.zoomliver_UNET_processor
#  - processors.plain_UNET_processor
#  - processors.histeq_processor
#  - processors.liveronly_label_processor
from numpy_data_layer import processors
processors_list = [processors.plain_UNET_processor]
# Step 1#第1步
#processors_list = [processors.histeq_processor, processors.plain_UNET_processor, processors.liveronly_label_processor]
#processors_list = [processors.histeq_processor, processors.plain_UNET_processor][1:]
# Step 2#第2步
#processors_list = [processors.remove_non_liver, processors.zoomliver_UNET_processor]
#processors_list = [processors.histeq_processor]# Shuffle slices and their augmentations globally across the database#在整个数据库中全局切换切片及其扩充
# You might want to set to False if dataset = test_set#如果dataset = test_set,您可能希望设置为False
shuffle_slices = True# Augmentation factor#增强因子
augmentation_factor = 10# ** Labels order : tissue=0, liver=1, lesion=2# **标签顺序:组织= 0,肝脏= 1,病变= 2
# ** We call a slice "lesion slice" if the MAX label it has is 2# **如果它的MAX标签是2,我们称之为切片“病变切片”
# slice options: liver-lesion, stat-batch, dyn-batch#切片选项:肝脏病变,STAT-批,达因批
#
# liver-only:   Include only slices which are labeld with liver or lower (1 or 0)切片选项:肝脏病变,STAT-批,达因批
# lesion-only:  Include only slices which are labeled with lesion or lower (2, 1 or 0)仅包含标有病变或更低(2,1或0)的切片
# liver-lesion: Include only slices which are labeled with liver or lesion (slices with max=2 or with max=1)仅包括标有肝脏或病变的切片(max = 2或max = 1的切片)
select_slices = "all"
#select_slices = 'liver-lesion肝脏病变'more_small_livers = False
# Percentage of the image, such that any liver small than that is considered small#图像的百分比,使得任何小于此的肝脏被认为是小的
small_liver_percent = 2decrease_empty_slices = 0.9
# data=[
# (49, '/home/guest/training/volume-49.nii', '/home/guest/training/segmentation-49.nii') ,
# (42, '/home/guest/training/volume-42.nii', '/home/guest/training/segmentation-42.nii') ,
# (23, '/home/guest/training/volume-23.nii', '/home/guest/training/segmentation-23.nii') ,
# (26, '/home/guest/training/volume-26.nii', '/home/guest/training/segmentation-26.nii') ,
# (37, '/home/guest/training/volume-37.nii', '/home/guest/training/segmentation-37.nii') ,
# (46, '/home/guest/training/volume-46.nii', '/home/guest/training/segmentation-46.nii') ,
# (2, '/home/guest/training/volume-2.nii', '/home/guest/training/segmentation-2.nii') ,
# (24, '/home/guest/training/volume-24.nii', '/home/guest/training/segmentation-24.nii') ,
# (44, '/home/guest/training/volume-44.nii', '/home/guest/training/segmentation-44.nii') ,
# (6, '/home/guest/training/volume-6.nii', '/home/guest/training/segmentation-6.nii') ,
# (25, '/home/guest/training/volume-25.nii', '/home/guest/training/segmentation-25.nii') ,
# (18, '/home/guest/training/volume-18.nii', '/home/guest/training/segmentation-18.nii') ,
# (16, '/home/guest/training/volume-16.nii', '/home/guest/training/segmentation-16.nii') ,
# (60, '/home/guest/training/volume-60.nii', '/home/guest/training/segmentation-60.nii') ,
# (59, '/home/guest/training/volume-59.nii', '/home/guest/training/segmentation-59.nii') ,
# (33, '/home/guest/training/volume-33.nii', '/home/guest/training/segmentation-33.nii') ,
# (58, '/home/guest/training/volume-58.nii', '/home/guest/training/segmentation-58.nii') ,
# (31, '/home/guest/training/volume-31.nii', '/home/guest/training/segmentation-31.nii') ,
# (54, '/home/guest/training/volume-54.nii', '/home/guest/training/segmentation-54.nii') ,
# (52, '/home/guest/training/volume-52.nii', '/home/guest/training/segmentation-52.nii') ,
# (12, '/home/guest/training/volume-12.nii', '/home/guest/training/segmentation-12.nii') ,
# (41, '/home/guest/training/volume-41.nii', '/home/guest/training/segmentation-41.nii') ,
# (56, '/home/guest/training/volume-56.nii', '/home/guest/training/segmentation-56.nii') ,
# (14, '/home/guest/training/volume-14.nii', '/home/guest/training/segmentation-14.nii') ,
# (4, '/home/guest/training/volume-4.nii', '/home/guest/training/segmentation-4.nii') ,
# (51, '/home/guest/training/volume-51.nii', '/home/guest/training/segmentation-51.nii') ,
# (47, '/home/guest/training/volume-47.nii', '/home/guest/training/segmentation-47.nii') ,
# (38, '/home/guest/training/volume-38.nii', '/home/guest/training/segmentation-38.nii') ,
# (34, '/home/guest/training/volume-34.nii', '/home/guest/training/segmentation-34.nii') ,
# (19, '/home/guest/training/volume-19.nii', '/home/guest/training/segmentation-19.nii') ,
# (43, '/home/guest/training/volume-43.nii', '/home/guest/training/segmentation-43.nii') ,
# (9, '/home/guest/training/volume-9.nii', '/home/guest/training/segmentation-9.nii') ,
# (15, '/home/guest/training/volume-15.nii', '/home/guest/training/segmentation-15.nii') ,
# (39, '/home/guest/training/volume-39.nii', '/home/guest/training/segmentation-39.nii') ,
# (20, '/home/guest/training/volume-20.nii', '/home/guest/training/segmentation-20.nii') ,
# (17, '/home/guest/training/volume-17.nii', '/home/guest/training/segmentation-17.nii') ,
# (55, '/home/guest/training/volume-55.nii', '/home/guest/training/segmentation-55.nii') ,
# (30, '/home/guest/training/volume-30.nii', '/home/guest/training/segmentation-30.nii') ,
# (29, '/home/guest/training/volume-29.nii', '/home/guest/training/segmentation-29.nii') ,
# (7, '/home/guest/training/volume-7.nii', '/home/guest/training/segmentation-7.nii') ,
# (22, '/home/guest/training/volume-22.nii', '/home/guest/training/segmentation-22.nii') ,
# (8, '/home/guest/training/volume-8.nii', '/home/guest/training/segmentation-8.nii') ,
# (13, '/home/guest/training/volume-13.nii', '/home/guest/training/segmentation-13.nii') ,
# (40, '/home/guest/training/volume-40.nii', '/home/guest/training/segmentation-40.nii') ,
# (0, '/home/guest/training/volume-0.nii', '/home/guest/training/segmentation-0.nii') ,
# (53, '/home/guest/training/volume-53.nii', '/home/guest/training/segmentation-53.nii') ,
# (5, '/home/guest/training/volume-5.nii', '/home/guest/training/segmentation-5.nii') ,
# (1, '/home/guest/training/volume-1.nii', '/home/guest/training/segmentation-1.nii') ,
# (36, '/home/guest/training/volume-36.nii', '/home/guest/training/segmentation-36.nii') ,
# (10, '/home/guest/training/volume-10.nii', '/home/guest/training/segmentation-10.nii') ,
# (48, '/home/guest/training/volume-48.nii', '/home/guest/training/segmentation-48.nii') ,
# (28, '/home/guest/training/volume-28.nii', '/home/guest/training/segmentation-28.nii') ,
# (11, '/home/guest/training/volume-11.nii', '/home/guest/training/segmentation-11.nii') ,
# (50, '/home/guest/training/volume-50.nii', '/home/guest/training/segmentation-50.nii') ,
# (45, '/home/guest/training/volume-45.nii', '/home/guest/training/segmentation-45.nii') ,
# (3, '/home/guest/training/volume-3.nii', '/home/guest/training/segmentation-3.nii') ,
# (57, '/home/guest/training/volume-57.nii', '/home/guest/training/segmentation-57.nii') ,
# (35, '/home/guest/training/volume-35.nii', '/home/guest/training/segmentation-35.nii') ,
# (32, '/home/guest/training/volume-32.nii', '/home/guest/training/segmentation-32.nii') ,
# (21, '/home/guest/training/volume-21.nii', '/home/guest/training/segmentation-21.nii') ,
# (27, '/home/guest/training/volume-27.nii', '/home/guest/training/segmentation-27.nii')]
data=[
(35, '/home/guest/training/volume-35.npy', '/home/guest/training/segmentation-35.npy') ,
(127, '/home/guest/training/volume-127.npy', '/home/guest/training/segmentation-127.npy') ,
(122, '/home/guest/training/volume-122.npy', '/home/guest/training/segmentation-122.npy') ,
(83, '/home/guest/training/volume-83.npy', '/home/guest/training/segmentation-83.npy') ,
(123, '/home/guest/training/volume-123.npy', '/home/guest/training/segmentation-123.npy') ,
(93, '/home/guest/training/volume-93.npy', '/home/guest/training/segmentation-93.npy') ,
(108, '/home/guest/training/volume-108.npy', '/home/guest/training/segmentation-108.npy') ,
(98, '/home/guest/training/volume-98.npy', '/home/guest/training/segmentation-98.npy') ,
(46, '/home/guest/training/volume-46.npy', '/home/guest/training/segmentation-46.npy') ,
(51, '/home/guest/training/volume-51.npy', '/home/guest/training/segmentation-51.npy') ,
(19, '/home/guest/training/volume-19.npy', '/home/guest/training/segmentation-19.npy') ,
(62, '/home/guest/training/volume-62.npy', '/home/guest/training/segmentation-62.npy') ,
(120, '/home/guest/training/volume-120.npy', '/home/guest/training/segmentation-120.npy') ,
(87, '/home/guest/training/volume-87.npy', '/home/guest/training/segmentation-87.npy') ,
(7, '/home/guest/training/volume-7.npy', '/home/guest/training/segmentation-7.npy') ,
(54, '/home/guest/training/volume-54.npy', '/home/guest/training/segmentation-54.npy') ,
(102, '/home/guest/training/volume-102.npy', '/home/guest/training/segmentation-102.npy') ,
(105, '/home/guest/training/volume-105.npy', '/home/guest/training/segmentation-105.npy') ,
(81, '/home/guest/training/volume-81.npy', '/home/guest/training/segmentation-81.npy') ,
(97, '/home/guest/training/volume-97.npy', '/home/guest/training/segmentation-97.npy') ,
(88, '/home/guest/training/volume-88.npy', '/home/guest/training/segmentation-88.npy') ,
(39, '/home/guest/training/volume-39.npy', '/home/guest/training/segmentation-39.npy') ,
(1, '/home/guest/training/volume-1.npy', '/home/guest/training/segmentation-1.npy') ,
(124, '/home/guest/training/volume-124.npy', '/home/guest/training/segmentation-124.npy') ,
(34, '/home/guest/training/volume-34.npy', '/home/guest/training/segmentation-34.npy') ,
(31, '/home/guest/training/volume-31.npy', '/home/guest/training/segmentation-31.npy') ,
(42, '/home/guest/training/volume-42.npy', '/home/guest/training/segmentation-42.npy') ,
(13, '/home/guest/training/volume-13.npy', '/home/guest/training/segmentation-13.npy') ,
(107, '/home/guest/training/volume-107.npy', '/home/guest/training/segmentation-107.npy') ,
(112, '/home/guest/training/volume-112.npy', '/home/guest/training/segmentation-112.npy') ,
(92, '/home/guest/training/volume-92.npy', '/home/guest/training/segmentation-92.npy') ,
(110, '/home/guest/training/volume-110.npy', '/home/guest/training/segmentation-110.npy') ,
(8, '/home/guest/training/volume-8.npy', '/home/guest/training/segmentation-8.npy') ,
(72, '/home/guest/training/volume-72.npy', '/home/guest/training/segmentation-72.npy') ,
(56, '/home/guest/training/volume-56.npy', '/home/guest/training/segmentation-56.npy') ,
(115, '/home/guest/training/volume-115.npy', '/home/guest/training/segmentation-115.npy') ,
(57, '/home/guest/training/volume-57.npy', '/home/guest/training/segmentation-57.npy') ,
(109, '/home/guest/training/volume-109.npy', '/home/guest/training/segmentation-109.npy') ,
(118, '/home/guest/training/volume-118.npy', '/home/guest/training/segmentation-118.npy') ,
(90, '/home/guest/training/volume-90.npy', '/home/guest/training/segmentation-90.npy') ,
(76, '/home/guest/training/volume-76.npy', '/home/guest/training/segmentation-76.npy') ,
(68, '/home/guest/training/volume-68.npy', '/home/guest/training/segmentation-68.npy') ,
(119, '/home/guest/training/volume-119.npy', '/home/guest/training/segmentation-119.npy') ,
(58, '/home/guest/training/volume-58.npy', '/home/guest/training/segmentation-58.npy') ,
(73, '/home/guest/training/volume-73.npy', '/home/guest/training/segmentation-73.npy') ,
(116, '/home/guest/training/volume-116.npy', '/home/guest/training/segmentation-116.npy') ,
(47, '/home/guest/training/volume-47.npy', '/home/guest/training/segmentation-47.npy') ,
(66, '/home/guest/training/volume-66.npy', '/home/guest/training/segmentation-66.npy') ,
(94, '/home/guest/training/volume-94.npy', '/home/guest/training/segmentation-94.npy') ,
(38, '/home/guest/training/volume-38.npy', '/home/guest/training/segmentation-38.npy') ,
(130, '/home/guest/training/volume-130.npy', '/home/guest/training/segmentation-130.npy') ,
(71, '/home/guest/training/volume-71.npy', '/home/guest/training/segmentation-71.npy') ,
(20, '/home/guest/training/volume-20.npy', '/home/guest/training/segmentation-20.npy') ,
(48, '/home/guest/training/volume-48.npy', '/home/guest/training/segmentation-48.npy') ,
(21, '/home/guest/training/volume-21.npy', '/home/guest/training/segmentation-21.npy') ,
(63, '/home/guest/training/volume-63.npy', '/home/guest/training/segmentation-63.npy') ,
(3, '/home/guest/training/volume-3.npy', '/home/guest/training/segmentation-3.npy') ,
(22, '/home/guest/training/volume-22.npy', '/home/guest/training/segmentation-22.npy') ,
(96, '/home/guest/training/volume-96.npy', '/home/guest/training/segmentation-96.npy') ,
(4, '/home/guest/training/volume-4.npy', '/home/guest/training/segmentation-4.npy') ,
(111, '/home/guest/training/volume-111.npy', '/home/guest/training/segmentation-111.npy') ,
(32, '/home/guest/training/volume-32.npy', '/home/guest/training/segmentation-32.npy') ,
(104, '/home/guest/training/volume-104.npy', '/home/guest/training/segmentation-104.npy') ,
(9, '/home/guest/training/volume-9.npy', '/home/guest/training/segmentation-9.npy') ,
(29, '/home/guest/training/volume-29.npy', '/home/guest/training/segmentation-29.npy') ,
(100, '/home/guest/training/volume-100.npy', '/home/guest/training/segmentation-100.npy') ,
(70, '/home/guest/training/volume-70.npy', '/home/guest/training/segmentation-70.npy') ,
(36, '/home/guest/training/volume-36.npy', '/home/guest/training/segmentation-36.npy') ,
(43, '/home/guest/training/volume-43.npy', '/home/guest/training/segmentation-43.npy') ,
(99, '/home/guest/training/volume-99.npy', '/home/guest/training/segmentation-99.npy') ,
(24, '/home/guest/training/volume-24.npy', '/home/guest/training/segmentation-24.npy') ,
(15, '/home/guest/training/volume-15.npy', '/home/guest/training/segmentation-15.npy') ,
(95, '/home/guest/training/volume-95.npy', '/home/guest/training/segmentation-95.npy') ,
(128, '/home/guest/training/volume-128.npy', '/home/guest/training/segmentation-128.npy') ,
(60, '/home/guest/training/volume-60.npy', '/home/guest/training/segmentation-60.npy') ,
(33, '/home/guest/training/volume-33.npy', '/home/guest/training/segmentation-33.npy') ,
(86, '/home/guest/training/volume-86.npy', '/home/guest/training/segmentation-86.npy') ,
(0, '/home/guest/training/volume-0.npy', '/home/guest/training/segmentation-0.npy') ,
(50, '/home/guest/training/volume-50.npy', '/home/guest/training/segmentation-50.npy') ,
(17, '/home/guest/training/volume-17.npy', '/home/guest/training/segmentation-17.npy') ,
(49, '/home/guest/training/volume-49.npy', '/home/guest/training/segmentation-49.npy') ,
(16, '/home/guest/training/volume-16.npy', '/home/guest/training/segmentation-16.npy') ,
(10, '/home/guest/training/volume-10.npy', '/home/guest/training/segmentation-10.npy') ,
(52, '/home/guest/training/volume-52.npy', '/home/guest/training/segmentation-52.npy') ,
(75, '/home/guest/training/volume-75.npy', '/home/guest/training/segmentation-75.npy') ,
(23, '/home/guest/training/volume-23.npy', '/home/guest/training/segmentation-23.npy') ,
(67, '/home/guest/training/volume-67.npy', '/home/guest/training/segmentation-67.npy') ,
(113, '/home/guest/training/volume-113.npy', '/home/guest/training/segmentation-113.npy') ,
(14, '/home/guest/training/volume-14.npy', '/home/guest/training/segmentation-14.npy') ,
(30, '/home/guest/training/volume-30.npy', '/home/guest/training/segmentation-30.npy') ,
(2, '/home/guest/training/volume-2.npy', '/home/guest/training/segmentation-2.npy') ,
(82, '/home/guest/training/volume-82.npy', '/home/guest/training/segmentation-82.npy') ,
(74, '/home/guest/training/volume-74.npy', '/home/guest/training/segmentation-74.npy') ,
(125, '/home/guest/training/volume-125.npy', '/home/guest/training/segmentation-125.npy') ,
(11, '/home/guest/training/volume-11.npy', '/home/guest/training/segmentation-11.npy') ,
(89, '/home/guest/training/volume-89.npy', '/home/guest/training/segmentation-89.npy') ,
(78, '/home/guest/training/volume-78.npy', '/home/guest/training/segmentation-78.npy') ,
(64, '/home/guest/training/volume-64.npy', '/home/guest/training/segmentation-64.npy') ,
(126, '/home/guest/training/volume-126.npy', '/home/guest/training/segmentation-126.npy') ,
(129, '/home/guest/training/volume-129.npy', '/home/guest/training/segmentation-129.npy') ,
(114, '/home/guest/training/volume-114.npy', '/home/guest/training/segmentation-114.npy') ,
(37, '/home/guest/training/volume-37.npy', '/home/guest/training/segmentation-37.npy') ,
(25, '/home/guest/training/volume-25.npy', '/home/guest/training/segmentation-25.npy') ,
(85, '/home/guest/training/volume-85.npy', '/home/guest/training/segmentation-85.npy') ,
(80, '/home/guest/training/volume-80.npy', '/home/guest/training/segmentation-80.npy') ,
(27, '/home/guest/training/volume-27.npy', '/home/guest/training/segmentation-27.npy') ,
(18, '/home/guest/training/volume-18.npy', '/home/guest/training/segmentation-18.npy') ,
(69, '/home/guest/training/volume-69.npy', '/home/guest/training/segmentation-69.npy') ,
(40, '/home/guest/training/volume-40.npy', '/home/guest/training/segmentation-40.npy') ,
(61, '/home/guest/training/volume-61.npy', '/home/guest/training/segmentation-61.npy') ,
(117, '/home/guest/training/volume-117.npy', '/home/guest/training/segmentation-117.npy') ,
(44, '/home/guest/training/volume-44.npy', '/home/guest/training/segmentation-44.npy') ,
(26, '/home/guest/training/volume-26.npy', '/home/guest/training/segmentation-26.npy') ,
(91, '/home/guest/training/volume-91.npy', '/home/guest/training/segmentation-91.npy') ,
(65, '/home/guest/training/volume-65.npy', '/home/guest/training/segmentation-65.npy') ,
(55, '/home/guest/training/volume-55.npy', '/home/guest/training/segmentation-55.npy') ,
(5, '/home/guest/training/volume-5.npy', '/home/guest/training/segmentation-5.npy') ,
(77, '/home/guest/training/volume-77.npy', '/home/guest/training/segmentation-77.npy') ,
(12, '/home/guest/training/volume-12.npy', '/home/guest/training/segmentation-12.npy') ,
(28, '/home/guest/training/volume-28.npy', '/home/guest/training/segmentation-28.npy') ,
(6, '/home/guest/training/volume-6.npy', '/home/guest/training/segmentation-6.npy') ,
(79, '/home/guest/training/volume-79.npy', '/home/guest/training/segmentation-79.npy') ,
(84, '/home/guest/training/volume-84.npy', '/home/guest/training/segmentation-84.npy') ,
(103, '/home/guest/training/volume-103.npy', '/home/guest/training/segmentation-103.npy') ,
(101, '/home/guest/training/volume-101.npy', '/home/guest/training/segmentation-101.npy') ,
(106, '/home/guest/training/volume-106.npy', '/home/guest/training/segmentation-106.npy') ,
(59, '/home/guest/training/volume-59.npy', '/home/guest/training/segmentation-59.npy') ,
(45, '/home/guest/training/volume-45.npy', '/home/guest/training/segmentation-45.npy') ,
(53, '/home/guest/training/volume-53.npy', '/home/guest/training/segmentation-53.npy') ,
(41, '/home/guest/training/volume-41.npy', '/home/guest/training/segmentation-41.npy') ,
(121, '/home/guest/training/volume-121.npy', '/home/guest/training/segmentation-121.npy')]train_set=[
(35, '/home/guest/training/volume-35.npy', '/home/guest/training/segmentation-35.npy') ,
(127, '/home/guest/training/volume-127.npy', '/home/guest/training/segmentation-127.npy') ,
(122, '/home/guest/training/volume-122.npy', '/home/guest/training/segmentation-122.npy') ,
(83, '/home/guest/training/volume-83.npy', '/home/guest/training/segmentation-83.npy') ,
(123, '/home/guest/training/volume-123.npy', '/home/guest/training/segmentation-123.npy') ,
(93, '/home/guest/training/volume-93.npy', '/home/guest/training/segmentation-93.npy') ,
(108, '/home/guest/training/volume-108.npy', '/home/guest/training/segmentation-108.npy') ,
(98, '/home/guest/training/volume-98.npy', '/home/guest/training/segmentation-98.npy') ,
(46, '/home/guest/training/volume-46.npy', '/home/guest/training/segmentation-46.npy') ,
(51, '/home/guest/training/volume-51.npy', '/home/guest/training/segmentation-51.npy') ,
(19, '/home/guest/training/volume-19.npy', '/home/guest/training/segmentation-19.npy') ,
(62, '/home/guest/training/volume-62.npy', '/home/guest/training/segmentation-62.npy') ,
(120, '/home/guest/training/volume-120.npy', '/home/guest/training/segmentation-120.npy') ,
(87, '/home/guest/training/volume-87.npy', '/home/guest/training/segmentation-87.npy') ,
(7, '/home/guest/training/volume-7.npy', '/home/guest/training/segmentation-7.npy') ,
(54, '/home/guest/training/volume-54.npy', '/home/guest/training/segmentation-54.npy') ,
(102, '/home/guest/training/volume-102.npy', '/home/guest/training/segmentation-102.npy') ,
(105, '/home/guest/training/volume-105.npy', '/home/guest/training/segmentation-105.npy') ,
(81, '/home/guest/training/volume-81.npy', '/home/guest/training/segmentation-81.npy') ,
(97, '/home/guest/training/volume-97.npy', '/home/guest/training/segmentation-97.npy') ,
(88, '/home/guest/training/volume-88.npy', '/home/guest/training/segmentation-88.npy') ,
(39, '/home/guest/training/volume-39.npy', '/home/guest/training/segmentation-39.npy') ,
(1, '/home/guest/training/volume-1.npy', '/home/guest/training/segmentation-1.npy') ,
(124, '/home/guest/training/volume-124.npy', '/home/guest/training/segmentation-124.npy') ,
(34, '/home/guest/training/volume-34.npy', '/home/guest/training/segmentation-34.npy') ,
(31, '/home/guest/training/volume-31.npy', '/home/guest/training/segmentation-31.npy') ,
(42, '/home/guest/training/volume-42.npy', '/home/guest/training/segmentation-42.npy') ,
(13, '/home/guest/training/volume-13.npy', '/home/guest/training/segmentation-13.npy') ,
(107, '/home/guest/training/volume-107.npy', '/home/guest/training/segmentation-107.npy') ,
(112, '/home/guest/training/volume-112.npy', '/home/guest/training/segmentation-112.npy') ,
(92, '/home/guest/training/volume-92.npy', '/home/guest/training/segmentation-92.npy') ,
(110, '/home/guest/training/volume-110.npy', '/home/guest/training/segmentation-110.npy') ,
(8, '/home/guest/training/volume-8.npy', '/home/guest/training/segmentation-8.npy') ,
(72, '/home/guest/training/volume-72.npy', '/home/guest/training/segmentation-72.npy') ,
(56, '/home/guest/training/volume-56.npy', '/home/guest/training/segmentation-56.npy') ,
(115, '/home/guest/training/volume-115.npy', '/home/guest/training/segmentation-115.npy') ,
(57, '/home/guest/training/volume-57.npy', '/home/guest/training/segmentation-57.npy') ,
(109, '/home/guest/training/volume-109.npy', '/home/guest/training/segmentation-109.npy') ,
(118, '/home/guest/training/volume-118.npy', '/home/guest/training/segmentation-118.npy') ,
(90, '/home/guest/training/volume-90.npy', '/home/guest/training/segmentation-90.npy') ,
(76, '/home/guest/training/volume-76.npy', '/home/guest/training/segmentation-76.npy') ,
(68, '/home/guest/training/volume-68.npy', '/home/guest/training/segmentation-68.npy') ,
(119, '/home/guest/training/volume-119.npy', '/home/guest/training/segmentation-119.npy') ,
(58, '/home/guest/training/volume-58.npy', '/home/guest/training/segmentation-58.npy') ,
(73, '/home/guest/training/volume-73.npy', '/home/guest/training/segmentation-73.npy') ,
(116, '/home/guest/training/volume-116.npy', '/home/guest/training/segmentation-116.npy') ,
(47, '/home/guest/training/volume-47.npy', '/home/guest/training/segmentation-47.npy') ,
(66, '/home/guest/training/volume-66.npy', '/home/guest/training/segmentation-66.npy') ,
(94, '/home/guest/training/volume-94.npy', '/home/guest/training/segmentation-94.npy') ,
(38, '/home/guest/training/volume-38.npy', '/home/guest/training/segmentation-38.npy') ,
(130, '/home/guest/training/volume-130.npy', '/home/guest/training/segmentation-130.npy') ,
(71, '/home/guest/training/volume-71.npy', '/home/guest/training/segmentation-71.npy') ,
(20, '/home/guest/training/volume-20.npy', '/home/guest/training/segmentation-20.npy') ,
(48, '/home/guest/training/volume-48.npy', '/home/guest/training/segmentation-48.npy') ,
(21, '/home/guest/training/volume-21.npy', '/home/guest/training/segmentation-21.npy') ,
(63, '/home/guest/training/volume-63.npy', '/home/guest/training/segmentation-63.npy') ,
(3, '/home/guest/training/volume-3.npy', '/home/guest/training/segmentation-3.npy') ,
(22, '/home/guest/training/volume-22.npy', '/home/guest/training/segmentation-22.npy') ,
(96, '/home/guest/training/volume-96.npy', '/home/guest/training/segmentation-96.npy') ,
(4, '/home/guest/training/volume-4.npy', '/home/guest/training/segmentation-4.npy') ,
(111, '/home/guest/training/volume-111.npy', '/home/guest/training/segmentation-111.npy') ,
(32, '/home/guest/training/volume-32.npy', '/home/guest/training/segmentation-32.npy') ,
(104, '/home/guest/training/volume-104.npy', '/home/guest/training/segmentation-104.npy') ,
(9, '/home/guest/training/volume-9.npy', '/home/guest/training/segmentation-9.npy') ,
(29, '/home/guest/training/volume-29.npy', '/home/guest/training/segmentation-29.npy') ,
(100, '/home/guest/training/volume-100.npy', '/home/guest/training/segmentation-100.npy') ,
(70, '/home/guest/training/volume-70.npy', '/home/guest/training/segmentation-70.npy') ,
(36, '/home/guest/training/volume-36.npy', '/home/guest/training/segmentation-36.npy') ,
(43, '/home/guest/training/volume-43.npy', '/home/guest/training/segmentation-43.npy') ,
(99, '/home/guest/training/volume-99.npy', '/home/guest/training/segmentation-99.npy') ,
(24, '/home/guest/training/volume-24.npy', '/home/guest/training/segmentation-24.npy') ,
(15, '/home/guest/training/volume-15.npy', '/home/guest/training/segmentation-15.npy') ,
(95, '/home/guest/training/volume-95.npy', '/home/guest/training/segmentation-95.npy') ,
(128, '/home/guest/training/volume-128.npy', '/home/guest/training/segmentation-128.npy') ,
(60, '/home/guest/training/volume-60.npy', '/home/guest/training/segmentation-60.npy') ,
(33, '/home/guest/training/volume-33.npy', '/home/guest/training/segmentation-33.npy') ,
(86, '/home/guest/training/volume-86.npy', '/home/guest/training/segmentation-86.npy') ,
(0, '/home/guest/training/volume-0.npy', '/home/guest/training/segmentation-0.npy') ,
(50, '/home/guest/training/volume-50.npy', '/home/guest/training/segmentation-50.npy') ,
(17, '/home/guest/training/volume-17.npy', '/home/guest/training/segmentation-17.npy') ,
(49, '/home/guest/training/volume-49.npy', '/home/guest/training/segmentation-49.npy') ,
(16, '/home/guest/training/volume-16.npy', '/home/guest/training/segmentation-16.npy') ,
(10, '/home/guest/training/volume-10.npy', '/home/guest/training/segmentation-10.npy') ,
(52, '/home/guest/training/volume-52.npy', '/home/guest/training/segmentation-52.npy') ,
(75, '/home/guest/training/volume-75.npy', '/home/guest/training/segmentation-75.npy') ,
(23, '/home/guest/training/volume-23.npy', '/home/guest/training/segmentation-23.npy') ,
(67, '/home/guest/training/volume-67.npy', '/home/guest/training/segmentation-67.npy') ,
(113, '/home/guest/training/volume-113.npy', '/home/guest/training/segmentation-113.npy') ,
(14, '/home/guest/training/volume-14.npy', '/home/guest/training/segmentation-14.npy') ,
(30, '/home/guest/training/volume-30.npy', '/home/guest/training/segmentation-30.npy') ,
(2, '/home/guest/training/volume-2.npy', '/home/guest/training/segmentation-2.npy') ]test_set=[
(82, '/home/guest/training/volume-82.npy', '/home/guest/training/segmentation-82.npy') ,
(74, '/home/guest/training/volume-74.npy', '/home/guest/training/segmentation-74.npy') ,
(125, '/home/guest/training/volume-125.npy', '/home/guest/training/segmentation-125.npy') ,
(11, '/home/guest/training/volume-11.npy', '/home/guest/training/segmentation-11.npy') ,
(89, '/home/guest/training/volume-89.npy', '/home/guest/training/segmentation-89.npy') ,
(78, '/home/guest/training/volume-78.npy', '/home/guest/training/segmentation-78.npy') ,
(64, '/home/guest/training/volume-64.npy', '/home/guest/training/segmentation-64.npy') ,
(126, '/home/guest/training/volume-126.npy', '/home/guest/training/segmentation-126.npy') ,
(129, '/home/guest/training/volume-129.npy', '/home/guest/training/segmentation-129.npy') ,
(114, '/home/guest/training/volume-114.npy', '/home/guest/training/segmentation-114.npy') ,
(37, '/home/guest/training/volume-37.npy', '/home/guest/training/segmentation-37.npy') ,
(25, '/home/guest/training/volume-25.npy', '/home/guest/training/segmentation-25.npy') ,
(85, '/home/guest/training/volume-85.npy', '/home/guest/training/segmentation-85.npy') ,
(80, '/home/guest/training/volume-80.npy', '/home/guest/training/segmentation-80.npy') ,
(27, '/home/guest/training/volume-27.npy', '/home/guest/training/segmentation-27.npy') ,
(18, '/home/guest/training/volume-18.npy', '/home/guest/training/segmentation-18.npy') ,
(69, '/home/guest/training/volume-69.npy', '/home/guest/training/segmentation-69.npy') ,
(40, '/home/guest/training/volume-40.npy', '/home/guest/training/segmentation-40.npy') ,
(61, '/home/guest/training/volume-61.npy', '/home/guest/training/segmentation-61.npy') ,
(117, '/home/guest/training/volume-117.npy', '/home/guest/training/segmentation-117.npy') ,
(44, '/home/guest/training/volume-44.npy', '/home/guest/training/segmentation-44.npy') ,
(26, '/home/guest/training/volume-26.npy', '/home/guest/training/segmentation-26.npy') ,
(91, '/home/guest/training/volume-91.npy', '/home/guest/training/segmentation-91.npy') ,
(65, '/home/guest/training/volume-65.npy', '/home/guest/training/segmentation-65.npy') ,
(55, '/home/guest/training/volume-55.npy', '/home/guest/training/segmentation-55.npy') ,
(5, '/home/guest/training/volume-5.npy', '/home/guest/training/segmentation-5.npy') ,
(77, '/home/guest/training/volume-77.npy', '/home/guest/training/segmentation-77.npy') ,
(12, '/home/guest/training/volume-12.npy', '/home/guest/training/segmentation-12.npy') ,
(28, '/home/guest/training/volume-28.npy', '/home/guest/training/segmentation-28.npy') ,
(6, '/home/guest/training/volume-6.npy', '/home/guest/training/segmentation-6.npy') ,
(79, '/home/guest/training/volume-79.npy', '/home/guest/training/segmentation-79.npy') ,
(84, '/home/guest/training/volume-84.npy', '/home/guest/training/segmentation-84.npy') ,
(103, '/home/guest/training/volume-103.npy', '/home/guest/training/segmentation-103.npy') ,
(101, '/home/guest/training/volume-101.npy', '/home/guest/training/segmentation-101.npy') ,
(106, '/home/guest/training/volume-106.npy', '/home/guest/training/segmentation-106.npy') ,
(59, '/home/guest/training/volume-59.npy', '/home/guest/training/segmentation-59.npy') ,
(45, '/home/guest/training/volume-45.npy', '/home/guest/training/segmentation-45.npy') ,
(53, '/home/guest/training/volume-53.npy', '/home/guest/training/segmentation-53.npy') ,
(41, '/home/guest/training/volume-41.npy', '/home/guest/training/segmentation-41.npy') ,
(121, '/home/guest/training/volume-121.npy', '/home/guest/training/segmentation-121.npy')]# Select network datasets#选择网络数据集
#train_dataset = irca_numpy_all[:10]
#test_dataset = irca_numpy_all[10:]train_dataset = train_set#fire3_train_set
test_dataset = test_set

实验数据2 numpy_data_layer.py

'''
Created on Apr 6, 2016
@author: Mohamed.Ezz
This module includes Caffe python data layers to read volumes directly from Npy files (3D CT volumes).该模块包含Caffe python数据层,可直接从Npy文件(3D CT卷)读取卷。
The layer scales well with large amounts of data, and supports prefetching for minimal processing overhead.该层可以很好地扩展大量数据,并支持预取,以最小化处理开销。
'''
import sys, os, time, random, shutil
import numpy as np
import lmdb, caffe, nibabel
from multiprocessing import Pool, Process, Queue
from Queue import Empty, Full
import scipy.misc, scipy.ndimage.interpolation
from tqdm import tqdm
import plyvel
from itertools import izip
import logging
from contextlib import closing## Deformation Augmentation#变形增强
from skimage.transform import PiecewiseAffineTransform, warpIMG_DTYPE = np.float
SEG_DTYPE = np.uint8# Prefetching queue#预取队列
MAX_QUEUE_SIZE = 1000
PREFETCH_BATCH_SIZE = 100def maybe_true(probability=0.5):rnd = random.random()return rnd <= probabilitydef to_scale(img, shape=None):if shape is None:shape = config.slice_shapeheight, width = shape高度,宽度=形状if img.dtype == SEG_DTYPE:return scipy.misc.imresize(img, (height, width), interp="nearest").astype(SEG_DTYPE)elif img.dtype == IMG_DTYPE:factor = 256.0 / np.max(img)return (scipy.misc.imresize(img, (height, width), interp="nearest") / factor).astype(IMG_DTYPE)else:raise TypeError('Error. To scale the image array, its type must be np.uint8 or np.float64. '错误。要缩放图像数组,其类型必须为np.uint8或np.float64。(' + str(img.dtype) + ')')def norm_hounsfield_dyn(arr, c_min=0.1, c_max=0.3):""" Converts from hounsfield units to float64 image with range 0.0 to 1.0 """“”“从hounsfield单位转换为float64图像,范围为0.0到1.0 ”“”# calc min and max#计算的最小和最大min, max = np.amin(arr), np.amax(arr)arr = arr.astype(IMG_DTYPE)if min <= 0:arr = np.clip(arr, min * c_min, max * c_max)# right shift to zero  #右移到零arr = np.abs(min * c_min) + arrelse:arr = np.clip(arr, min, max * c_max)# left shift to zero #左移到零arr = arr - min# normalizationnorm_fac = np.amax(arr)if norm_fac != 0:# norm = (arr*255)/ norm_facnorm = np.divide(np.multiply(arr, 255),np.amax(arr))else:  # don't divide through 0#不要除以0norm = np.multiply(arr, 255)norm = np.clip(np.multiply(norm, 0.00390625), 0, 1)return normclass augmentation:### Core functions# ##核心职能@staticmethoddef _get_shift(img, seg, x, y):"""Move pixel in a direction by attaching on the other side. (i.e. x=5 -> 5 pixel to the right; y=-7 seven pixel down):param id: slice id in current volume:return: Shifted img and seg"""“”通过附加在另一侧的方向移动像素。(即x = 5  - > 5像素向右; y = -7七像素向下):param id:当前卷中的slice id:return:移位img和seg “”“# slide in x direction#沿x方向滑动if x != 0:img = np.append(img[x:, :], img[:x, :], axis=0)seg = np.append(seg[x:, :], seg[:x, :], axis=0)# slide in y direction#沿y方向滑动if y != 0:img = np.append(img[:, -y:], img[:, :-y], axis=1)seg = np.append(seg[:, -y:], seg[:, :-y], axis=1)return img, seg@staticmethoddef _crop(img, seg, crop_type, frac=0.95):height, width = img.shapeif crop_type == 'lt':box = (0, 0,int(round(width * frac)), int(round(height * frac)))elif crop_type == 'rt':box = (int(round((1.0 - frac) * width)), 0,width, int(round(height * frac)))elif crop_type == 'lb':box = (0, int(round((1.0 - frac) * height)),int(round(width * frac)), height)elif crop_type == 'rb':box = (int(round((1.0 - frac) * width)), int(round((1.0 - frac) * height)),width, height)elif crop_type == 'c':box = (int(round((1.0 - frac) * (width / 2.0))), int(round((1.0 - frac) * (height / 2.0))),int(round(width * (frac + (1 - frac) / 2.0))), int(round(height * (frac + (1 - frac) / 2.0))))else:raise ValueError("Wrong crop_type. Must be lt, rt, lb, rb or c.")# Do the cropping#做裁剪x1, y1, x2, y2 = boximg, seg = img[y1:y2, x1:x2], seg[y1:y2, x1:x2]return img, seg@staticmethoddef _rotate(img, angle):# Prevent augmentation with no rotation, otherwise the same image will be appended #防止无旋转的扩充,否则将附加相同的图像if angle == 0:angle = 1# rotate without interpolation (order=0 makes it take nearest pixel)#旋转而不插补(为了= 0时取最近的像素)rotated = scipy.ndimage.interpolation.rotate(img, angle, order=0)# rotation results in extra pixels on the borders#旋转导致在边界额外的像素# We fix it assuming square shape#我们把它固定成方形assert img.shape[0] == img.shape[1], "Given image for rotation is not of square shape给定的旋转图像不是方形 :" + str(img.shape)extra = rotated.shape[0] - img.shape[0]extra_left = extra / 2extra_right = extra - extra_leftrotated = rotated[extra_left: -extra_right, extra_left: - extra_right]return rotated###########################################    PUBLIC FUNCTIONS    ############################################# ##################################### #####公共职能######## ####################################@staticmethoddef identity(img, seg):""" return original slices...."""return img, seg@staticmethoddef noise(img, seg):img_noisy = (img + 0.3 * img.std() * np.random.random(img.shape)).astype(IMG_DTYPE)return img_noisy, seg@staticmethoddef get_shift_up(img, seg):height = img.shape[0]return augmentation._get_shift(img, seg, 0, int(height / 15))@staticmethoddef get_shift_down(img, seg):height = img.shape[0]return augmentation._get_shift(img, seg, 0, -int(height / 15))@staticmethoddef get_shift_left(img, seg):width = img.shape[1]return augmentation._get_shift(img, seg, -int(width / 15), 0)@staticmethoddef get_shift_right(img, seg):width = img.shape[1]return augmentation._get_shift(img, seg, int(width / 15), 0)@staticmethoddef crop_lb(img, seg):return augmentation._crop(img, seg, 'lb')@staticmethoddef crop_rt(img, seg):return augmentation._crop(img, seg, 'rt')@staticmethoddef crop_c(img, seg):return augmentation._crop(img, seg, 'c')@staticmethoddef rotate(img, seg):rand = random.randrange(-10, 10)return augmentation._rotate(img, rand), augmentation._rotate(seg, rand)class processors:@staticmethoddef histeq_processor(img, seg):"""Histogram equalization"""“”“直方图均衡”“”nbr_bins = 256# get image histogram#获取图像的直方图imhist, bins = np.histogram(img.flatten(), nbr_bins, normed=True)cdf = imhist.cumsum()  # cumulative distribution function#累积分布函数cdf = 255 * cdf / cdf[-1]  # normalize#正常化# use linear interpolation of cdf to find new pixel values#使用cdf的线性插值来查找新的像素值original_shape = img.shapeimg = np.interp(img.flatten(), bins[:-1], cdf)img = img / 255.0return img.reshape(original_shape), seg@staticmethoddef plain_UNET_processor(img, seg):img = to_scale(img, (388, 388))seg = to_scale(seg, (388, 388))# Now do padding for UNET, which takes 572x572#现在为UNET做填充,需要572x572# seg=np.pad(seg,((92,92),(92,92)),mode='reflect')# SEG = np.pad(SEG,((92,92),(92,92)),模式= '反映')img = np.pad(img, 92, mode='reflect')return img, seg@staticmethoddef liveronly_label_processor(img, seg):"""Converts lesion labels to liver label. The resulting classifier classifies liver vs. background."""“”将病变标签转换为肝脏标签。由此产生的分类器将肝脏与背景分类。“”seg[seg == 2] = 1return img, seg@staticmethoddef remove_non_liver(img, seg):# Remove background !#删除背景!img = np.multiply(img, np.clip(seg, 0, 1))return img, seg@staticmethoddef zoomliver_UNET_processor(img, seg):""" Custom preprocessing of img,seg for UNET architecture:Crops the background and upsamples the found patch."""“”“用于UNET架构的img,seg的自定义预处理:裁剪背景并对找到的补丁进行上采样。“””# get patch size #得到补丁大小col_maxes = np.max(seg, axis=0)  # a rowrow_maxes = np.max(seg, axis=1)  # a columnnonzero_colmaxes = np.nonzero(col_maxes)[0]nonzero_rowmaxes = np.nonzero(row_maxes)[0]x1, x2 = nonzero_colmaxes[0], nonzero_colmaxes[-1]y1, y2 = nonzero_rowmaxes[0], nonzero_rowmaxes[-1]width = x2 - x1height = y2 - y1MIN_WIDTH = 60MIN_HEIGHT = 60x_pad = int((MIN_WIDTH - width) / 2.0 if width < MIN_WIDTH else 0)y_pad = int((MIN_HEIGHT - height) / 2.0 if height < MIN_HEIGHT else 0)# Additional padding to make sure boundary lesions are included#附加填充以确保包括边界病变# SAFETY_PAD = 15# x_pad += SAFETY_PAD# y_pad += SAFETY_PADx1 = max(0, x1 - x_pad)x2 = min(img.shape[1], x2 + x_pad)y1 = max(0, y1 - y_pad)y2 = min(img.shape[0], y2 + y_pad)img = img[y1:y2 + 1, x1:x2 + 1]seg = seg[y1:y2 + 1, x1:x2 + 1]img = to_scale(img, (388, 388))seg = to_scale(seg, (388, 388))# All non-lesion is background#所有非病变都是背景seg[seg == 1] = 0# Lesion label becomes 1#病变标签变为1seg[seg == 2] = 1# Now do padding for UNET, which takes 572x572#现在为UNET做填充,需要572x572# seg=np.pad(seg,((92,92),(92,92)),mode='reflect')img = np.pad(img, 92, mode='reflect')return img, segimport configclass NumpyDataLayer(caffe.Layer):""" Caffe Data layer that reads directly from npy files """“”“直接从npy文件读取的Caffe数据层”“”def setup(self, bottom, top):print "Setup NumpyDataLayer"self.top_names = ['data', 'label']self.batch_size = 1  # current batch_size>1 is not implemented. but very simple to implement in the forward() function# current batch_size> 1未实现。但是在forward()函数中实现非常简单self.img_volumes = []  # list of numpy volumes# numpy的卷列表self.seg_volumes = []  # list of numpy label volumes# numpy的标签卷的列表self.n_volumes = 0  # number of volumes in dataset#数据集中的卷数self.n_augmentations = config.augmentation_factor  # number of possible augmentations#可能的扩充次数self.queue = Queue(MAX_QUEUE_SIZE)self.n_total_slices = 0for vol_id, img_path, seg_path in self.dataset:# shape initially is like 512,512,129#形状最初是像512512129imgvol = np.load(img_path, mmap_mode='r')imgvol = np.rot90(imgvol)  # rotate so that spine is down, not left#转动,使脊柱下来,不剩imgvol = np.transpose(imgvol, (2, 0, 1))  # bring slice index to first place #带来切片索引到第一位置self.img_volumes.append(imgvol)segvol = np.load(seg_path, mmap_mode='r')segvol = np.rot90(segvol)segvol = np.transpose(segvol, (2, 0, 1))self.seg_volumes.append(segvol)assert imgvol.shape == segvol.shape, "Volume and segmentation have different shapes音量和分段有不同的形状: %s vs. %s" % (str(imgvol.shape), str(segvol.shape))self.n_volumes += 1self.n_total_slices += segvol.shape[0]print "Dataset has ", self.n_total_slices, "(before augmentation)"top[0].reshape(1, 1, 572, 572)top[1].reshape(1, 1, 388, 388)# Seed the random generator#播种随机生成器np.random.seed(123)# Put first input into queue#将第一个输入放入队列child_seed = np.random.randint(0, 9000)# The child_seed is a randomly generated seed and it is needed because#是一个随机生成的种子,它是所需要的,因为child_seed# without it, every newly created process will be identical and will generate #没有它,每个新创建的进程都将是相同的并将生成# the same sequence of random numbers#相同的随机数序列self.p = Process(target=self.prepare_next_batch, args=(child_seed,))self.p.start()import atexitdef cleanup():print "Terminating dangling process终止悬空过程"self.p.terminate()self.p.join()atexit.register(cleanup)import signalsignal.signal(signal.SIGINT, cleanup)def reshape(self, bottom, top):passdef forward(self, bottom, top):while True:try:img, seg = self.queue.get(timeout=1)breakexcept Empty:  # If queue is empty for any reason, must get_next_slice now#如果队列因任何原因为空,则必须立即获取get_next_slice# Make sure that there is no self.p currently running#确保当前没有运行self.p.if not self.p.is_alive():# be 100% sure to terminate self.p# 100%肯定会终止self.pself.p.join()print "forward(): Queue was empty. Spawing prefetcher and retrying队列是空的.Spawing prefetcher并重试"child_seed = np.random.randint(0, 9000)self.p = Process(target=self.prepare_next_batch, args=(child_seed,))self.p.start()top[0].data[0, ...] = imgtop[1].data[0, ...] = seg# self.p.join()# child_seed = np.random.randint(0,9000)# self.pool_result = self.ppool.apply_async(self, args=(child_seed,))# self.pool_result.get()# self.p = Process(target = self.prepare_next_batch, args=(child_seed,))# self.p.start()def backward(self, top, propagate_down, bottom):passdef prepare_next_batch(self, seed):np.random.seed(seed)for _ in range(PREFETCH_BATCH_SIZE):self.get_next_slice()def get_next_slice(self):""" Randomly pick a next slice and push it to the shared queue """“”“随机选择下一个切片并将其推送到共享队列”“”while True:# Pick random slice and augmentation# Doing it this way, each volume has equal probability of being selected regardless of# how many slices it has.# Each slice inside the volume has equal chances to be picked.# But globally, not every slice has the same probablity of being selected,# it depends on how many other slices in its same volume is competing with it.#选择随机切片和扩充#这样做,每个卷都有相同的被选中概率,无论如何#它有多少片。#卷内的每个切片都有相同的拾取机会。#但在全球范围内,并非每个切片都具有相同的选择概率,#它取决于同一卷中有多少其他切片与之竞争。vol_idx = np.random.randint(0, self.n_volumes)slice_idx = np.random.randint(0, self.img_volumes[vol_idx].shape[0])aug_idx = np.random.randint(0, self.n_augmentations)img = self.img_volumes[vol_idx][slice_idx]seg = self.seg_volumes[vol_idx][slice_idx]# print vol_idx, slice_idx, aug_idx# Only break if we found a relevant slice#打印vol_idx,slice_idx,aug_idx#只有在找到相关切片时才会中断if self.is_relevant_slice(seg):breakimg, seg = self.prepare_slice(img, seg, aug_idx)try:self.queue.put((img, seg))except Full:passdef is_relevant_slice(self, slc):""" Checks whether a given segmentation slice is relevant, according to rule specified in config.select_slices (e.g., lesion-only)"""“”根据config.select_slices中指定的规则检查给定的分段切片是否相关(例如,仅损伤)“”“# We increase small livers by rejecting non-small liver slices more frequently#我们通过更频繁地拒绝非小肝片来增加小肝脏if config.more_small_livers:n_liver = 1.0 * np.sum(slc > 0)if (100 * n_liver / slc.size) > config.small_liver_percent:  # NOT small liverreturn maybe_true(0.7)if config.select_slices == "all":# Reject half of the slices that has no liver/lesion#拒绝一半没有肝脏/病变的切片if np.count_nonzero(slc) == 0:return maybe_true(0.3)return Truemax = np.max(slc)if config.select_slices == "liver-lesion肝脏病变":return max == 1 or max == 2elif config.select_slices == "lesion-only仅病变":return max == 2elif config.select_slices == "liver-only仅肝脏":return max == 1else:raise ValueError("Invalid value for config.select_slices :", config.select_slices)def prepare_slice(self, img, seg, aug_idx):# Make sure 0 >= label >= 2seg = np.clip(seg, 0, 2)img = norm_hounsfield_dyn(img)img, seg = self.augment_slice(img, seg, aug_idx)for processor in config.processors_list:img, seg = processor(img, seg)# img = to_scale(img, (400,400))# seg = to_scale(seg, (400,400))return img, segdef augment_slice(self, img, seg, aug_idx):aug_func = [augmentation.identity,augmentation.crop_lb,augmentation.crop_rt,augmentation.crop_c,augmentation.rotate,augmentation.rotate,augmentation.get_shift_up,augmentation.get_shift_down,augmentation.get_shift_left,augmentation.get_shift_right]# augmentation.noise# Invoke the selected augmentation function#调用选定的增强功能img, seg = aug_func[aug_idx](img, seg)return img, segclass NumpyTrainDataLayer(NumpyDataLayer):""" NumpyDataLayer for the Train dataset """“”“train数据集的NumpyDataLayer ”“”def setup(self, bottom, top):self.dataset = config.train_datasetprint 'Training size:', len(self.dataset)super(NumpyTrainDataLayer, self).setup(bottom, top)class NumpyTestDataLayer(NumpyDataLayer):""" NumpyDataLayer for the Test dataset """“”“测试数据集的NumpyDataLayer ”“”def setup(self, bottom, top):self.dataset = config.test_datasetprint 'Training size:', len(self.dataset)super(NumpyTestDataLayer, self).setup(bottom, top)

验证1

import logging# Number of CPUs used for parallel processing#用于并行处理的CPU数量
N_PROC = 14# Image/Seg shape#图片/ SEG形状
slice_shape = (388,388)#################
#### OUTPUT #####
#################ct_window_type='stat'
ct_window_type_min=-100
ct_window_type_max=200# Logging level#日志记录级别
log_level = logging.INFO
output_dir = "/home/guest/output/"
logfile = 'output.txt'
# Save liver.npy and lesion.npy volumes to output_dir/[niftiname].liver.npy, with shape h,w,slices,classes#将liver.npy和lesion.npy卷保存到output_dir / [niftiname] .liver.npy,其中包含形状h,w,切片,类
save_probability_volumes =  True
save_probability_volumes = True
# Save slices as png files. This param is the increment between plotting one slice and the next#将切片保存为png文件。该参数是绘制一个切片和下一个切片之间的增量
# Set 0 or -1 to disable plotting#设置0或-1以禁用绘图
plot_every_n_slices = -1test_set=[
(82, '/home/guest/training/volume-82.npy', '/home/guest/training/segmentation-82.npy') ,
(74, '/home/guest/training/volume-74.npy', '/home/guest/training/segmentation-74.npy') ,
(125, '/home/guest/training/volume-125.npy', '/home/guest/training/segmentation-125.npy') ,
(11, '/home/guest/training/volume-11.npy', '/home/guest/training/segmentation-11.npy') ,
(89, '/home/guest/training/volume-89.npy', '/home/guest/training/segmentation-89.npy') ,
(78, '/home/guest/training/volume-78.npy', '/home/guest/training/segmentation-78.npy') ,
(64, '/home/guest/training/volume-64.npy', '/home/guest/training/segmentation-64.npy') ,
(126, '/home/guest/training/volume-126.npy', '/home/guest/training/segmentation-126.npy') ,
(129, '/home/guest/training/volume-129.npy', '/home/guest/training/segmentation-129.npy') ,
(114, '/home/guest/training/volume-114.npy', '/home/guest/training/segmentation-114.npy') ,
(37, '/home/guest/training/volume-37.npy', '/home/guest/training/segmentation-37.npy') ,
(25, '/home/guest/training/volume-25.npy', '/home/guest/training/segmentation-25.npy') ,
(85, '/home/guest/training/volume-85.npy', '/home/guest/training/segmentation-85.npy') ,
(80, '/home/guest/training/volume-80.npy', '/home/guest/training/segmentation-80.npy') ,
(27, '/home/guest/training/volume-27.npy', '/home/guest/training/segmentation-27.npy') ,
(18, '/home/guest/training/volume-18.npy', '/home/guest/training/segmentation-18.npy') ,
(69, '/home/guest/training/volume-69.npy', '/home/guest/training/segmentation-69.npy') ,
(40, '/home/guest/training/volume-40.npy', '/home/guest/training/segmentation-40.npy') ,
(61, '/home/guest/training/volume-61.npy', '/home/guest/training/segmentation-61.npy') ,
(117, '/home/guest/training/volume-117.npy', '/home/guest/training/segmentation-117.npy') ,
(44, '/home/guest/training/volume-44.npy', '/home/guest/training/segmentation-44.npy') ,
(26, '/home/guest/training/volume-26.npy', '/home/guest/training/segmentation-26.npy') ,
(91, '/home/guest/training/volume-91.npy', '/home/guest/training/segmentation-91.npy') ,
(65, '/home/guest/training/volume-65.npy', '/home/guest/training/segmentation-65.npy') ,
(55, '/home/guest/training/volume-55.npy', '/home/guest/training/segmentation-55.npy') ,
(5, '/home/guest/training/volume-5.npy', '/home/guest/training/segmentation-5.npy') ,
(77, '/home/guest/training/volume-77.npy', '/home/guest/training/segmentation-77.npy') ,
(12, '/home/guest/training/volume-12.npy', '/home/guest/training/segmentation-12.npy') ,
(28, '/home/guest/training/volume-28.npy', '/home/guest/training/segmentation-28.npy') ,
(6, '/home/guest/training/volume-6.npy', '/home/guest/training/segmentation-6.npy') ,
(79, '/home/guest/training/volume-79.npy', '/home/guest/training/segmentation-79.npy') ,
(84, '/home/guest/training/volume-84.npy', '/home/guest/training/segmentation-84.npy') ,
(103, '/home/guest/training/volume-103.npy', '/home/guest/training/segmentation-103.npy') ,
(101, '/home/guest/training/volume-101.npy', '/home/guest/training/segmentation-101.npy') ,
(106, '/home/guest/training/volume-106.npy', '/home/guest/training/segmentation-106.npy') ,
(59, '/home/guest/training/volume-59.npy', '/home/guest/training/segmentation-59.npy') ,
(45, '/home/guest/training/volume-45.npy', '/home/guest/training/segmentation-45.npy') ,
(53, '/home/guest/training/volume-53.npy', '/home/guest/training/segmentation-53.npy') ,
(41, '/home/guest/training/volume-41.npy', '/home/guest/training/segmentation-41.npy') ,
(121, '/home/guest/training/volume-121.npy', '/home/guest/training/segmentation-121.npy')]dataset = [test_set]
models=['/home/guest/step1/_iter_77000.caffemodel']
models_step_two=['/home/guest/step2/_iter_60000.caffemodel']
deployprototxt=['/home/guest/deploy.prototxt']
deployprototxt_step_two=['/home/guest/deploy.prototxt']

验证2

name: "phseg_v5"
force_backward: trueinput: "data"
input_dim:1
input_dim:1
input_dim:572
input_dim:572layer {name: "conv_d0a-b"type: "Convolution"bottom: "data"top: "d0b"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 64pad: 0kernel_size: 3weight_filler {type: "xavier"}engine: CAFFE}
}#layer { name: "bn_d0b" type: "BatchNorm" bottom: "d0b" top: "d0b"
#  param {lr_mult: 0} param {lr_mult: 0} param {lr_mult: 0}}layer {name: "relu_d0b"type: "ReLU"bottom: "d0b"top: "d0b"
}
layer {name: "conv_d0b-c"type: "Convolution"bottom: "d0b"top: "d0c"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 64pad: 0kernel_size: 3weight_filler {type: "xavier"}engine: CAFFE}
}#layer { name: "bn_d0c" type: "BatchNorm" bottom: "d0c" top: "d0c"
#  param {lr_mult: 0} param {lr_mult: 0} param {lr_mult: 0}}layer {name: "relu_d0c"type: "ReLU"bottom: "d0c"top: "d0c"
}
layer {name: "pool_d0c-1a"type: "Pooling"bottom: "d0c"top: "d1a"pooling_param {pool: MAXkernel_size: 2stride: 2}
}
layer {name: "conv_d1a-b"type: "Convolution"bottom: "d1a"top: "d1b"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 128pad: 0kernel_size: 3weight_filler {type: "xavier"}engine: CAFFE}
}#layer { name: "bn_d1b" type: "BatchNorm" bottom: "d1b" top: "d1b"
#  param {lr_mult: 0} param {lr_mult: 0} param {lr_mult: 0}}layer {name: "relu_d1b"type: "ReLU"bottom: "d1b"top: "d1b"
}
layer {name: "conv_d1b-c"type: "Convolution"bottom: "d1b"top: "d1c"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 128pad: 0kernel_size: 3weight_filler {type: "xavier"}engine: CAFFE}
}#layer { name: "bn_d1c" type: "BatchNorm" bottom: "d1c" top: "d1c"
#  param {lr_mult: 0} param {lr_mult: 0} param {lr_mult: 0}}layer {name: "relu_d1c"type: "ReLU"bottom: "d1c"top: "d1c"
}
layer {name: "pool_d1c-2a"type: "Pooling"bottom: "d1c"top: "d2a"pooling_param {pool: MAXkernel_size: 2stride: 2}
}
layer {name: "conv_d2a-b"type: "Convolution"bottom: "d2a"top: "d2b"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 256pad: 0kernel_size: 3weight_filler {type: "xavier"}engine: CAFFE}
}#layer { name: "bn_d2b" type: "BatchNorm" bottom: "d2b" top: "d2b"
#  param {lr_mult: 0} param {lr_mult: 0} param {lr_mult: 0}}layer {name: "relu_d2b"type: "ReLU"bottom: "d2b"top: "d2b"
}
layer {name: "conv_d2b-c"type: "Convolution"bottom: "d2b"top: "d2c"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 256pad: 0kernel_size: 3weight_filler {type: "xavier"}engine: CAFFE}
}#layer { name: "bn_d2c" type: "BatchNorm" bottom: "d2c" top: "d2c"
#  param {lr_mult: 0} param {lr_mult: 0} param {lr_mult: 0}}layer {name: "relu_d2c"type: "ReLU"bottom: "d2c"top: "d2c"
}
layer {name: "pool_d2c-3a"type: "Pooling"bottom: "d2c"top: "d3a"pooling_param {pool: MAXkernel_size: 2stride: 2}
}
layer {name: "conv_d3a-b"type: "Convolution"bottom: "d3a"top: "d3b"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 512pad: 0kernel_size: 3weight_filler {type: "xavier"}engine: CAFFE}
}#layer { name: "bn_d3b" type: "BatchNorm" bottom: "d3b" top: "d3b"
#  param {lr_mult: 0} param {lr_mult: 0} param {lr_mult: 0}}layer {name: "relu_d3b"type: "ReLU"bottom: "d3b"top: "d3b"
}
layer {name: "conv_d3b-c"type: "Convolution"bottom: "d3b"top: "d3c"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 512pad: 0kernel_size: 3weight_filler {type: "xavier"}engine: CAFFE}
}#layer { name: "bn_d3c" type: "BatchNorm" bottom: "d3c" top: "d3c"
#  param {lr_mult: 0} param {lr_mult: 0} param {lr_mult: 0}}layer {name: "relu_d3c"type: "ReLU"bottom: "d3c"top: "d3c"
}layer {name: "dropout_d3c"type: "Dropout"bottom: "d3c"top: "d3c"include {phase: TRAIN}dropout_param {dropout_ratio: 0.5}}layer {name: "pool_d3c-4a"type: "Pooling"bottom: "d3c"top: "d4a"pooling_param {pool: MAXkernel_size: 2stride: 2}
}
layer {name: "conv_d4a-b"type: "Convolution"bottom: "d4a"top: "d4b"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 1024pad: 0kernel_size: 3weight_filler {type: "xavier"}engine: CAFFE}
}#layer { name: "bn_d4b" type: "BatchNorm" bottom: "d4b" top: "d4b"
#  param {lr_mult: 0} param {lr_mult: 0} param {lr_mult: 0}}layer {name: "relu_d4b"type: "ReLU"bottom: "d4b"top: "d4b"
}
layer {name: "conv_d4b-c"type: "Convolution"bottom: "d4b"top: "d4c"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 1024pad: 0kernel_size: 3weight_filler {type: "xavier"}engine: CAFFE}
}#layer { name: "bn_d4c" type: "BatchNorm" bottom: "d4c" top: "d4c"
#  param {lr_mult: 0} param {lr_mult: 0} param {lr_mult: 0}}layer {name: "relu_d4c"type: "ReLU"bottom: "d4c"top: "d4c"
}layer {name: "dropout_d4c"type: "Dropout"bottom: "d4c"top: "d4c"include {phase: TRAIN}dropout_param {dropout_ratio: 0.5}}layer {name: "upconv_d4c_u3a"type: "Deconvolution"bottom: "d4c"top: "u3a"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 512pad: 0kernel_size: 2stride: 2weight_filler {type: "xavier"}}
}layer {name: "relu_u3a"type: "ReLU"bottom: "u3a"top: "u3a"
}
layer {name: "crop_d3c-d3cc"type: "Crop"bottom: "d3c"bottom: "u3a"top: "d3cc"}
layer {name: "concat_d3cc_u3a-b"type: "Concat"bottom: "u3a"bottom: "d3cc"top: "u3b"
}
layer {name: "conv_u3b-c"type: "Convolution"bottom: "u3b"top: "u3c"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 512pad: 0kernel_size: 3weight_filler {type: "xavier"}engine: CAFFE}
}
layer {name: "relu_u3c"type: "ReLU"bottom: "u3c"top: "u3c"
}
layer {name: "conv_u3c-d"type: "Convolution"bottom: "u3c"top: "u3d"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 512pad: 0kernel_size: 3weight_filler {type: "xavier"}engine: CAFFE}
}
layer {name: "relu_u3d"type: "ReLU"bottom: "u3d"top: "u3d"
}
layer {name: "upconv_u3d_u2a"type: "Deconvolution"bottom: "u3d"top: "u2a"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 256pad: 0kernel_size: 2stride: 2weight_filler {type: "xavier"}}
}
layer {name: "relu_u2a"type: "ReLU"bottom: "u2a"top: "u2a"
}
layer {name: "crop_d2c-d2cc"type: "Crop"bottom: "d2c"bottom: "u2a"top: "d2cc"}
layer {name: "concat_d2cc_u2a-b"type: "Concat"bottom: "u2a"bottom: "d2cc"top: "u2b"
}
layer {name: "conv_u2b-c"type: "Convolution"bottom: "u2b"top: "u2c"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 256pad: 0kernel_size: 3weight_filler {type: "xavier"}engine: CAFFE}
}
layer {name: "relu_u2c"type: "ReLU"bottom: "u2c"top: "u2c"
}
layer {name: "conv_u2c-d"type: "Convolution"bottom: "u2c"top: "u2d"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 256pad: 0kernel_size: 3weight_filler {type: "xavier"}engine: CAFFE}
}
layer {name: "relu_u2d"type: "ReLU"bottom: "u2d"top: "u2d"
}
layer {name: "upconv_u2d_u1a"type: "Deconvolution"bottom: "u2d"top: "u1a"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 128pad: 0kernel_size: 2stride: 2weight_filler {type: "xavier"}}
}
layer {name: "relu_u1a"type: "ReLU"bottom: "u1a"top: "u1a"
}
layer {name: "crop_d1c-d1cc"type: "Crop"bottom: "d1c"bottom: "u1a"top: "d1cc"}
layer {name: "concat_d1cc_u1a-b"type: "Concat"bottom: "u1a"bottom: "d1cc"top: "u1b"
}
layer {name: "conv_u1b-c"type: "Convolution"bottom: "u1b"top: "u1c"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 128pad: 0kernel_size: 3weight_filler {type: "xavier"}engine: CAFFE}
}
layer {name: "relu_u1c"type: "ReLU"bottom: "u1c"top: "u1c"
}
layer {name: "conv_u1c-d"type: "Convolution"bottom: "u1c"top: "u1d"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 128pad: 0kernel_size: 3weight_filler {type: "xavier"}engine: CAFFE}
}
layer {name: "relu_u1d"type: "ReLU"bottom: "u1d"top: "u1d"
}
layer {name: "upconv_u1d_u0a_NEW"type: "Deconvolution"bottom: "u1d"top: "u0a"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 64pad: 0kernel_size: 2stride: 2weight_filler {type: "xavier"}}
}
layer {name: "relu_u0a"type: "ReLU"bottom: "u0a"top: "u0a"
}
layer {name: "crop_d0c-d0cc"type: "Crop"bottom: "d0c"bottom: "u0a"top: "d0cc"}
layer {name: "concat_d0cc_u0a-b"type: "Concat"bottom: "u0a"bottom: "d0cc"top: "u0b"
}
layer {name: "conv_u0b-c_New"type: "Convolution"bottom: "u0b"top: "u0c"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 64pad: 0kernel_size: 3weight_filler {type: "xavier"}engine: CAFFE}
}
layer {name: "relu_u0c"type: "ReLU"bottom: "u0c"top: "u0c"
}
layer {name: "conv_u0c-d_New"type: "Convolution"bottom: "u0c"top: "u0d"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 64pad: 0kernel_size: 3weight_filler {type: "xavier"}engine: CAFFE}
}
layer {name: "relu_u0d"type: "ReLU"bottom: "u0d"top: "u0d"
}
layer {name: "conv_u0d-score_New"type: "Convolution"bottom: "u0d"top: "score"param {lr_mult: 1decay_mult: 1}param {lr_mult: 2decay_mult: 0}convolution_param {num_output: 2pad: 0kernel_size: 1weight_filler {type: "xavier"}engine: CAFFE}
}layer {name: "prob"type: "Softmax"bottom: "score"top: "prob"include {phase: TEST}
}

验证3

"""
@package medpy.metric.surface
Holds a metrics class computing surface metrics over two 3D-images contain each a binary object.
Classes:- Surface: Computes different surface metrics between two 3D-images contain each an object.
@author Oskar Maier
@version r0.4.1
@since 2011-12-01
@status Release
"""# build-in modules
import math# third-party modules
import scipy.spatial
import scipy.ndimage.morphology# own modules# code
class Surface(object):"""Computes different surface metrics between two 3D-images contain each an object.The surface of the objects is computed using a 18-neighbourhood edge detection.The distance metrics are computed over all points of the surfaces using the nearestneighbour approach.Beside this provides a number of statistics of the two images.During the initialization the edge detection is run for both images, taking up to5 min (on 512^3 images). The first call to one of the metric measures triggers thecomputation of the nearest neighbours, taking up to 7 minutes (based on 250.000 edgepoint for each of the objects, which corresponds to a typical liver mask). Allsubsequent calls to one of the metrics measures can be expected be in thesub-millisecond area.Metrics defined in:Heimann, T.; van Ginneken, B.; Styner, M.A.; Arzhaeva, Y.; Aurich, V.; Bauer, C.; Beck, A.; Becker, C.; Beichel, R.; Bekes, G.; Bello, F.; Binnig, G.; Bischof, H.; Bornik, A.; Cashman, P.; Ying Chi; Cordova, A.; Dawant, B.M.; Fidrich, M.; Furst, J.D.; Furukawa, D.; Grenacher, L.; Hornegger, J.; Kainmuller, D.; Kitney, R.I.; Kobatake, H.; Lamecker, H.; Lange, T.; Jeongjin Lee; Lennon, B.; Rui Li; Senhu Li; Meinzer, H.-P.; Nemeth, G.; Raicu, D.S.; Rau, A.-M.; van Rikxoort, E.M.; Rousson, M.; Rusko, L.; Saddi, K.A.; Schmidt, G.; Seghers, D.; Shimizu, A.; Slagmolen, P.; Sorantin, E.; Soza, G.; Susomboon, R.; Waite, J.M.; Wimmer, A.; Wolf, I.; , "Comparison and Evaluation of Methods for Liver Segmentation From CT Datasets," Medical Imaging, IEEE Transactions on , vol.28, no.8, pp.1251-1265, Aug. 2009doi: 10.1109/TMI.2009.2013851"""# The edge points of the mask object.__mask_edge_points = None# The edge points of the reference object.__reference_edge_points = None# The nearest neighbours distances between mask and reference edge points.__mask_reference_nn = None# The nearest neighbours distances between reference and mask edge points.__reference_mask_nn = None# Distances of the two objects surface points.__distance_matrix = Nonedef __init__(self, mask, reference, physical_voxel_spacing = [1,1,1], mask_offset = [0,0,0], reference_offset = [0,0,0]):"""Initialize the class with two binary images, each containing a single object.Assumes the input to be a representation of a 3D image, that fits one of thefollowing formats:- 1. all 0 values denoting background, all others the foreground/object- 2. all False values denoting the background, all others the foreground/objectThe first image passed is referred to as 'mask', the second as 'reference'. Thisis only important for some metrics that are not symmetric (and therefore notreally metrics).@param mask binary mask as an scipy array (3D image)@param reference binary reference as an scipy array (3D image)@param physical_voxel_spacing The physical voxel spacing of the two images(must be the same for both)@param mask_offset offset of the mask array to 0,0,0-origin@param reference_offset offset of the reference array to 0,0,0-origin"""# compute edge imagesmask_edge_image = Surface.compute_contour(mask)reference_edge_image = Surface.compute_contour(reference)# collect the object edge voxel positions# !TODO: When the distance matrix is already calculated here# these points don't have to be actually stored, only their number.# But there might be some later metric implementation that requires the# points and then it would be good to have them. What is better?mask_pts = mask_edge_image.nonzero()mask_edge_points = zip(mask_pts[0], mask_pts[1], mask_pts[2])reference_pts = reference_edge_image.nonzero()reference_edge_points = zip(reference_pts[0], reference_pts[1], reference_pts[2])# check if there is actually an object presentif 0 >= len(mask_edge_points):raise Exception('The mask image does not seem to contain an object.')if 0 >= len(reference_edge_points):raise Exception('The reference image does not seem to contain an object.')# add offsets to the voxels positions and multiply with physical voxel spacing# to get the real positions in millimetersphysical_voxel_spacing = scipy.array(physical_voxel_spacing)mask_edge_points += scipy.array(mask_offset)mask_edge_points *= physical_voxel_spacingreference_edge_points += scipy.array(reference_offset)reference_edge_points *= physical_voxel_spacing# set member varsself.__mask_edge_points = mask_edge_pointsself.__reference_edge_points = reference_edge_pointsdef get_maximum_symmetric_surface_distance(self):"""Computes the maximum symmetric surface distance, also known as Hausdorffdistance, between the two objects surfaces.@return the maximum symmetric surface distance in millimetersFor a perfect segmentation this distance is 0. This metric is sensitive tooutliers and returns the true maximum error.Metric definition:Let \f$S(A)\f$ denote the set of surface voxels of \f$A\f$. The shortestdistance of an arbitrary voxel \f$v\f$ to \f$S(A)\f$ is defined as:\f[d(v,S(A)) = \min_{s_A\in S(A)} ||v-s_A||\f]where \f$||.||\f$ denotes the Euclidean distance. The maximum symmetricsurface distance is then given by:\f[MSD(A,B) = \max\left\{\max_{s_A\in S(A)} d(s_A,S(B)),\max_{s_B\in S(B)} d(s_B,S(A)),\right\}\f]"""# Get the maximum of the nearest neighbour distancesA_B_distance = self.get_mask_reference_nn().max()B_A_distance = self.get_reference_mask_nn().max()# compute result and returnreturn max(A_B_distance, B_A_distance)def get_root_mean_square_symmetric_surface_distance(self):"""Computes the root mean square symmetric surface distance between thetwo objects surfaces.@return root mean square symmetric surface distance in millimetersFor a perfect segmentation this distance is 0. This metric punishes largedeviations from the true contour stronger than the average symmetric surfacedistance.Metric definition:Let \f$S(A)\f$ denote the set of surface voxels of \f$A\f$. The shortestdistance of an arbitrary voxel \f$v\f$ to \f$S(A)\f$ is defined as:\f[d(v,S(A)) = \min_{s_A\in S(A)} ||v-s_A||\f]where \f$||.||\f$ denotes the Euclidean distance. The root mean squaresymmetric surface distance is then given by:\f[RMSD(A,B) =\sqrt{\frac{1}{|S(A)|+|S(B)|}}\times\sqrt{\sum_{s_A\in S(A)} d^2(s_A,S(B))+\sum_{s_B\in S(B)} d^2(s_B,S(A))}\f]"""# get object sizesmask_surface_size = len(self.get_mask_edge_points())reference_surface_sice = len(self.get_reference_edge_points())# get minimal nearest neighbours distancesA_B_distances = self.get_mask_reference_nn()B_A_distances = self.get_reference_mask_nn()# square the distancesA_B_distances_sqrt = A_B_distances * A_B_distancesB_A_distances_sqrt = B_A_distances * B_A_distances# sum the minimal distancesA_B_distances_sum = A_B_distances_sqrt.sum()B_A_distances_sum = B_A_distances_sqrt.sum()# compute result and returnreturn math.sqrt(1. / (mask_surface_size + reference_surface_sice)) * math.sqrt(A_B_distances_sum + B_A_distances_sum)def get_average_symmetric_surface_distance(self):"""Computes the average symmetric surface distance between thetwo objects surfaces.@return average symmetric surface distance in millimetersFor a perfect segmentation this distance is 0.Metric definition:Let \f$S(A)\f$ denote the set of surface voxels of \f$A\f$. The shortestdistance of an arbitrary voxel \f$v\f$ to \f$S(A)\f$ is defined as:\f[d(v,S(A)) = \min_{s_A\in S(A)} ||v-s_A||\f]where \f$||.||\f$ denotes the Euclidean distance. The average symmetricsurface distance is then given by:\f[ASD(A,B) =\frac{1}{|S(A)|+|S(B)|}\left(\sum_{s_A\in S(A)} d(s_A,S(B))+\sum_{s_B\in S(B)} d(s_B,S(A))\right)\f]"""# get object sizesmask_surface_size = len(self.get_mask_edge_points())reference_surface_sice = len(self.get_reference_edge_points())# get minimal nearest neighbours distancesA_B_distances = self.get_mask_reference_nn()B_A_distances = self.get_reference_mask_nn()# sum the minimal distancesA_B_distances = A_B_distances.sum()B_A_distances = B_A_distances.sum()# compute result and returnreturn 1. / (mask_surface_size + reference_surface_sice) * (A_B_distances + B_A_distances)def get_mask_reference_nn(self):"""@return The distances of the nearest neighbours of all mask edge points to allreference edge points."""# Note: see note for @see get_reference_mask_nnif None == self.__mask_reference_nn:tree = scipy.spatial.cKDTree(self.get_mask_edge_points())self.__mask_reference_nn, _ = tree.query(self.get_reference_edge_points())return self.__mask_reference_nndef get_reference_mask_nn(self):"""@return The distances of the nearest neighbours of all reference edge pointsto all mask edge points.The underlying algorithm used for the scipy.spatial.KDTree implementation isbased on:Sunil Arya, David M. Mount, Nathan S. Netanyahu, Ruth Silverman, andAngela Y. Wu. 1998. An optimal algorithm for approximate nearest neighborsearching fixed dimensions. J. ACM 45, 6 (November 1998), 891-923"""# Note: KDTree is faster than scipy.spatial.distance.cdist when the number of# voxels exceeds 10.000 (computationally tested). The maximum complexity is# O(D*N^2) vs. O(D*N*log(N), where D=3 and N=number of voxelsif None == self.__reference_mask_nn:tree = scipy.spatial.cKDTree(self.get_reference_edge_points())self.__reference_mask_nn, _ = tree.query(self.get_mask_edge_points())return self.__reference_mask_nndef get_mask_edge_points(self):"""@return The edge points of the mask object."""return self.__mask_edge_pointsdef get_reference_edge_points(self):"""@return The edge points of the reference object."""return self.__reference_edge_points@staticmethoddef compute_contour(array):"""Uses a 18-neighbourhood filter to create an edge image of the input object.Assumes the input to be a representation of a 3D image, that fits one of thefollowing formats:- 1. all 0 values denoting background, all others the foreground/object- 2. all False values denoting the background, all others the foreground/objectThe area outside the array is assumed to contain background voxels. The methoddoes not ensure that the object voxels are actually connected, this is silentlyassumed.@param array a numpy array with only 0/N\{0} or False/True values.@return a boolean numpy array with the input objects edges"""# set 18-neighbourhood/conectivity (for 3D images) alias face-and-edge kernel# all values covered by 1/True passed to the function# as a 1D array in order left-right, top-down# Note: all in all 19 ones, as the center value# also has to be checked (if it is a masked pixel)# [[[0, 1, 0], [[1, 1, 1],  [[0, 1, 0],#   [1, 1, 1],  [1, 1, 1],   [1, 1, 1],#   [0, 1, 0]], [1, 1, 1]],  [0, 1, 0]]]footprint = scipy.ndimage.morphology.generate_binary_structure(3, 2)# create an erode version of the arrayerode_array = scipy.ndimage.morphology.binary_erosion(array, footprint)# xor the erode_array with the original and returnreturn array ^ erode_array

验证4

'''
Contains common functions for reading data out of leveldb
@author: Mohamed.Ezz
'''
import plyvel, lmdb
import numpy as np
from caffe.proto import caffe_pb2
IMG_DTYPE = np.float
SEG_DTYPE = np.uint8def denormalize_img_255(arr):""" Denormalizes a nparray to 0-255 values """min = arr.min()max = arr.max()new = (arr - min) * (255.0 / (max - min))return new.astype(np.uint8)def leveldb_arrays(leveldbdir):""" Generator. Given leveldb directory, iterate the stored data as numpy arrays. Yields (Key, NumpyArray) """db = CaffeDatabase(leveldbdir)for k, v in db.iterator():yield k, to_numpy_matrix(v)def nth_datum(caffedb, n):""" Returns nth datum. 0-based index"""n += 1it = caffedb.iterator()for _ in range(n):_, v = it.next()datum = caffe_pb2.Datum()datum.ParseFromString(v)return datumdef get_data_type(datum):""" By simple calculations, conclude the size of integers stored in datum.data """n_values = datum.height * datum.width * datum.channelsn_bytes = len(datum.data)int_size = float(n_bytes) / n_valuesif int_size != int(int_size) or int_size not in [1, 2, 4, 8]:raise ValueError("Can't find int size. n_values : %i , n_bytes : %i" % (n_values, n_bytes))types = {1: np.int8, 2: np.int16, 4: np.int32, 8: np.int64}type_ = types[int(int_size)]return type_def find_keycount(caffedb, count_values=None):""" Takes a CaffeDatabase or plyvel.DB instance and returns number of keys found and count of each value.count_values is a list of values to count, e.g. count_values=[0,1,2] will return [count of 1s, count of 2s, count of 3s]if count_values is None, return value of this function is [],key_count"""count = 0total_value_counts = np.array([0] * len(count_values or []))for _, v in caffedb.iterator():count += 1if count_values is not None:array = to_numpy_matrix(v)current_count = np.array([0] * len(count_values))for i, val in enumerate(count_values):current_count[i] = np.sum(array == val)total_value_counts += current_countreturn total_value_counts, countdef to_numpy_matrix(v):""" Convert leveldb/lmdb value to numpy matrix of shape N x N """datum = caffe_pb2.Datum()datum.ParseFromString(v)# Three cases# 1- int imgs in data,# 2- int8 labels in dataif len(datum.data) > 0:type_ = get_data_type(datum)matrix = np.fromstring(datum.data, dtype=type_)# 3- float imgs in float_data,elif len(datum.float_data) > 0:matrix = np.array(datum.float_data)else:raise ValueError("Serialized datum have empty data and float_data.")matrix = matrix.reshape((datum.height, datum.width))return matrixdef norm_hounsfield_dyn(arr, c_min=0.1, c_max=0.3):""" Converts from hounsfield units to float64 image with range 0.0 to 1.0 """# calc min and maxmin, max = np.amin(arr), np.amax(arr)arr = arr.astype(IMG_DTYPE)if min <= 0:arr = np.clip(arr, min * c_min, max * c_max)# right shift to zeroarr = np.abs(min * c_min) + arrelse:arr = np.clip(arr, min, max * c_max)# left shift to zeroarr = arr - min# normalizationnorm_fac = np.amax(arr)if norm_fac != 0:norm = np.divide(np.multiply(arr, 255),np.amax(arr))else:  # don't divide through 0norm = np.multiply(arr, 255)norm = np.clip(np.multiply(norm, 0.00390625), 0, 1)return normdef norm_hounsfield_stat(arr, c_min=-100, c_max=200):min = np.amin(arr)arr = np.array(arr, dtype=IMG_DTYPE)if min <= 0:# clipc_arr = np.clip(arr, c_min, c_max)# right shift to zeroslc_0 = np.add(np.abs(min), c_arr)else:# clipc_arr = np.clip(arr, c_min, c_max)# left shift to zeroslc_0 = np.subtract(c_arr, min)# normalizationnorm_fac = np.amax(slc_0)if norm_fac != 0:norm = np.divide(np.multiply(slc_0,255),np.amax(slc_0))else:  # don't divide through 0norm = np.multiply(slc_0, 255)norm = np.clip(np.multiply(norm, 0.00390625), 0, 1)return normclass CaffeDatabase():""" Abstraction layer over lmdb and leveldb """def __init__(self, path, backend='lmdb'):self.backend = backendassert backend in ['lmdb', 'leveldb'], "Database backend not known :%s" % backendif backend == 'lmdb':self.db = lmdb.open(path)elif backend == 'leveldb':self.db = plyvel.DB(path)def iterator(self):if self.backend == 'lmdb':txn = self.db.begin()cursor = txn.cursor()it = cursor.iternext()elif self.backend == 'leveldb':it = self.db.iterator()return it

验证5

import config
import loggingimport scipy as sp
import scipy.misc, scipy.ndimage.interpolation
import caffecaffe.set_mode_gpu()import matplotlib
from matplotlib import pyplot as pltmatplotlib.use('Agg')
import os
from denseinference import CRFProcessorfrom medpy import metricimport nibabel as nibimport numpy as np
import IPython
# this should actually be part of medpy. Apparently it isn't (anymore). So the surface.py file from http://pydoc.net/Python/MedPy/0.2.2/medpy.metric._surface/ should be manually imported
from surface import Surfacefrom utils import norm_hounsfield_stat, norm_hounsfield_dynIMG_DTYPE = np.float
SEG_DTYPE = np.uint8def miccaiimshow(img, seg, preds, fname, titles=None, plot_separate_img=True):"""Takes raw image img, seg in range 0-2, list of predictions in range 0-2"""plt.figure(figsize=(25, 25))ALPHA = 1n_plots = len(preds)subplot_offset = 0plt.set_cmap('gray')if plot_separate_img:n_plots += 1subplot_offset = 1plt.subplot(1, n_plots, 1)plt.subplots_adjust(wspace=0, hspace=0)plt.title("Image")plt.axis('off')plt.imshow(img, cmap="gray")if type(preds) != list:preds = [preds]for i, pred in enumerate(preds):# Order of overaly########## OLD# lesion= pred==2# difflesion = set_minus(seg==2,lesion)# liver = set_minus(pred==1, [lesion, difflesion])# diffliver = set_minus(seg==1, [liver,lesion,difflesion])##########lesion = pred == 2difflesion = np.logical_xor(seg == 2, lesion)liver = pred == 1diffliver = np.logical_xor(seg == 1, liver)plt.subplot(1, n_plots, i + 1 + subplot_offset)title = titles[i] if titles is not None and i < len(titles) else ""plt.title(title)plt.axis('off')plt.imshow(img);plt.hold(True)# Liver predictionplt.imshow(np.ma.masked_where(liver == 0, liver), cmap="Greens", vmin=0.1, vmax=1.2, alpha=ALPHA);plt.hold(True)# Liver : Pixels in ground truth, not in predictionplt.imshow(np.ma.masked_where(diffliver == 0, diffliver), cmap="Spectral", vmin=0.1, vmax=2.2, alpha=ALPHA);plt.hold(True)# Lesion predictionplt.imshow(np.ma.masked_where(lesion == 0, lesion), cmap="Blues", vmin=0.1, vmax=1.2, alpha=ALPHA);plt.hold(True)# Lesion : Pixels in ground truth, not in predictionplt.imshow(np.ma.masked_where(difflesion == 0, difflesion), cmap="Reds", vmin=0.1, vmax=1.5, alpha=ALPHA)plt.savefig(fname)plt.close()def to_scale(img, shape=None):if shape is None:shape = config.slice_shapeheight, width = shapeif img.dtype == SEG_DTYPE:return scipy.misc.imresize(img, (height, width), interp="nearest").astype(SEG_DTYPE)elif img.dtype == IMG_DTYPE:max_ = np.max(img)factor = 256.0 / max_ if max_ != 0 else 1return (scipy.misc.imresize(img, (height, width), interp="nearest") / factor).astype(IMG_DTYPE)else:raise TypeError('Error. To scale the image array, its type must be np.uint8 or np.float64. (' + str(img.dtype) + ')')def histeq_processor(img):"""Histogram equalization"""nbr_bins = 256# get image histogramimhist, bins = np.histogram(img.flatten(), nbr_bins, normed=True)cdf = imhist.cumsum()  # cumulative distribution functioncdf = 255 * cdf / cdf[-1]  # normalize# use linear interpolation of cdf to find new pixel valuesoriginal_shape = img.shapeimg = np.interp(img.flatten(), bins[:-1], cdf)img = img / 255.0return img.reshape(original_shape)def downscale_img_label(imgvol, label_vol):"""Downscales an image volume and an label volume. Normalizes the hounsfield units of the image volume:param imgvol::param label_vol::return:"""imgvol = imgvol.astype(IMG_DTYPE)label_vol = label_vol.astype(SEG_DTYPE)slc=Noneimgvol_downscaled = np.zeros((config.slice_shape[0], config.slice_shape[1], imgvol.shape[2]))label_vol_downscaled = np.zeros((config.slice_shape[0], config.slice_shape[1], imgvol.shape[2]))# Copy image volume# copy_imgvol = np.copy(imgvol)# Truncate metal and high absorbative objectslogging.info('Found' + str(np.sum(imgvol > 1200)) + 'values > 1200 !!')imgvol[imgvol > 1200] = 0for i in range(imgvol.shape[2]):# Get the current slc, normalize and downscaleslc = imgvol[:, :, i]if config.ct_window_type == 'dyn':slc = norm_hounsfield_dyn(slc, c_min=config.ct_window_type_min, c_max=config.ct_window_type_max)elif config.ct_window_type == 'stat':slc = norm_hounsfield_stat(slc, c_min=config.ct_window_type_min, c_max=config.ct_window_type_max)else:print "CT Windowing did not work."slc = to_scale(slc, config.slice_shape)# slc = histeq_processor(slc)imgvol_downscaled[:, :, i] = slc# downscale the label slc for the crflabel_vol_downscaled[:, :, i] = to_scale(label_vol[:, :, i], config.slice_shape)return [imgvol_downscaled, label_vol_downscaled]def scorer(pred, label):""":param pred::param label::param voxelspacing::return:"""volscores = {}volscores['dice'] = metric.dc(pred, label)volscores['jaccard'] = metric.binary.jc(pred, label)volscores['voe'] = 1. - volscores['jaccard']volscores['rvd'] = metric.ravd(label, pred)if np.count_nonzero(pred) == 0 or np.count_nonzero(label) == 0:volscores['assd'] = 0volscores['msd'] = 0# else:#     evalsurf = Surface(pred, label, physical_voxel_spacing=vxlspacing, mask_offset=[0., 0., 0.],#                        reference_offset=[0., 0., 0.])#     volscores['assd'] = evalsurf.get_average_symmetric_surface_distance()##     volscores['msd'] = metric.hd(label, pred, voxelspacing=vxlspacing)logging.info("\tDice " + str(volscores['dice']))logging.info("\tJaccard " + str(volscores['jaccard']))logging.info("\tVOE " + str(volscores['voe']))logging.info("\tRVD " + str(volscores['rvd']))# logging.info("\tASSD " + str(volscores['assd']))# logging.info("\tMSD " + str(volscores['msd']))return volscoresdef get_average_score(scorelist, scorename, mode=None):""":param scorelist::param scorename::return:"""score = 0.for e in scorelist:if mode == 'abs':score += np.abs(e[scorename])else:score += e[scorename]score /= float(len(scorelist))return scoredef zoomliver_UNET_processor(img, seg):""" Custom preprocessing of img,seg for UNET architecture:Crops the background and upsamples the found patch."""# Remove background !img = np.multiply(img, np.clip(seg, 0, 1))# get patch sizecol_maxes = np.max(seg, axis=0)  # a rowrow_maxes = np.max(seg, axis=1)  # a columnnonzero_colmaxes = np.nonzero(col_maxes)[0]nonzero_rowmaxes = np.nonzero(row_maxes)[0]x1, x2 = nonzero_colmaxes[0], nonzero_colmaxes[-1]y1, y2 = nonzero_rowmaxes[0], nonzero_rowmaxes[-1]width = x2 - x1height = y2 - y1MIN_WIDTH = 60MIN_HEIGHT = 60x_pad = (MIN_WIDTH - width) / 2 if width < MIN_WIDTH else 0y_pad = (MIN_HEIGHT - height) / 2 if height < MIN_HEIGHT else 0x1 = max(0, x1 - x_pad)x2 = min(img.shape[1], x2 + x_pad)y1 = max(0, y1 - y_pad)y2 = min(img.shape[0], y2 + y_pad)img = img[y1:y2 + 1, x1:x2 + 1]seg = seg[y1:y2 + 1, x1:x2 + 1]img = to_scale(img, (388, 388))seg = to_scale(seg, (388, 388))# All non-lesion is backgroundseg[seg == 1] = 0# Lesion label becomes 1seg[seg == 2] = 1# Now do padding for UNET, which takes 572x572# seg=np.pad(seg,((92,92),(92,92)),mode='reflect')img = np.pad(img, 92, mode='reflect')return img, (x1, x2, y1, y2)if __name__ == '__main__':try:logging.basicConfig(filename=os.path.join(config.output_dir, config.logfile), filemode='w',level=config.log_level, format='%(asctime)s %(levelname)s:%(message)s',datefmt='%d-%m-%Y %I:%M:%S %p')# lists to calculate the overall score over all folds from, i.e. holds scores of all volumesoverall_score_liver = []overall_score_lesion_crf = []overall_score_liver_crf = []overall_score_lesion = []# Iterate folds and corresponding modelsfor fold, model, deployprototxt, model_step_two, deployprototxt_step_two in zip(config.dataset, config.models,config.deployprototxt,config.models_step_two,config.deployprototxt_step_two):logging.info("Starting new fold")# Lists to save scores for each volume of this foldfoldscore_lesion_crf = []foldscore_liver_crf = []foldscore_liver = []foldscore_lesion = []# Iterate volumes in foldfor volidx, volpaths in enumerate(fold):logging.info("Loading Network for Step 1")# load new network for this foldtry:del net  # it is a good idea to delete the net object to free up memory before instantiating another onenet = caffe.Net(deployprototxt, model, caffe.TEST)except NameError:net = caffe.Net(deployprototxt, model, caffe.TEST)logging.info("Loading " + volpaths[1])imgvol = np.load(volpaths[1])labelvol = np.load(volpaths[2])# the raw probabilites of step 1probvol = np.zeros((config.slice_shape[0], config.slice_shape[1], imgvol.shape[2], 2))# the probabilites of step 2 scaled back down into the volumepred_step_two = np.zeros((config.slice_shape[0], config.slice_shape[1], imgvol.shape[2]))pred_step_one = np.zeros((config.slice_shape[0], config.slice_shape[1], imgvol.shape[2]))probvol_step_two = np.zeros((config.slice_shape[0], config.slice_shape[1], imgvol.shape[2], 2))# rotate volumes so that the networks sees them in the same orientation like during trainingimgvol = np.rot90(imgvol)labelvol = np.rot90(labelvol)imgvol_downscaled, labelvol_downscaled = downscale_img_label(imgvol, labelvol)# iterate slices in volume and do predictionlogging.info("Predicting " + volpaths[1])for i in range(imgvol_downscaled.shape[2]):slc = imgvol_downscaled[:, :, i]# create mirrored slc for unetslc = np.pad(slc, ((92, 92), (92, 92)), mode='reflect')# load slc into network and do forward passnet.blobs['data'].data[...] = slcnet.forward()# now save raw probabilitiesprobvol[:, :, i, :] = net.blobs['prob'].data.transpose((0, 2, 3, 1))[0]pred_step_one[:, :, i] = np.argmax(probvol[:, :, i, :], axis=2)# result shape is batch_img_idx , height, width, probability_of_class# dump probabiliteis to .npy file for future use# np.save('./probfiles/' + ))##FIX THISlogging.info("Here are the liver scores before CRF:")# calculate scores for liverpred_to_use = np.logical_or(probvol.argmax(3) == 1, probvol.argmax(3) == 2)label_to_use = np.logical_or(labelvol_downscaled == 1, labelvol_downscaled == 2)#voxelspacing = volpaths[3]volumescore_liver = scorer(pred_to_use, label_to_use)# Run Liver CRFlogging.info("Now running CRF on Liver")crfparams = {'max_iterations': 10, 'dynamic_z': True, 'ignore_memory': True, 'pos_x_std': 1.5,'pos_y_std': 1.5,'pos_z_std': 1.5, 'pos_w': 3.0, 'bilateral_x_std': 9.0, 'bilateral_y_std': 9.0,'bilateral_z_std': 9.0, 'bilateral_intensity_std': 20.0, 'bilateral_w': 10.0}pro = CRFProcessor.CRF3DProcessor(**crfparams)if config.save_probability_volumes:np.save(os.path.join(config.output_dir, os.path.basename(volpaths[1])) + ".liver.npy", probvol)crf_pred_liver = pro.set_data_and_run(imgvol_downscaled, probvol)# calculate scores for liverlabel_to_use = np.logical_or(labelvol_downscaled == 1, labelvol_downscaled == 2)logging.info("Here are the liver scores after CRF:")volumescore_liver_crf = scorer(crf_pred_liver, label_to_use)# calculate scores for lesions# pred_to_use = probvol.argmax(3)==2# label_to_use = labelvol_downscaled==2# volumescore_lesion = scorer(pred_to_use,label_to_use,voxelspacing)# OK, we're done on the first step of the cascaded networks and have evaluated them.# Now let's get to the second step.del netlogging.info("Deleted network for cascade step 1")net = caffe.Net(deployprototxt_step_two, model_step_two, caffe.TEST)logging.info("Loaded network for cascade step 2")# we again iterate over all slices in the volumefor i in range(imgvol_downscaled.shape[2]):slc = imgvol_downscaled[:, :, i]# create mirrored slc for unet# slc = np.pad(slc,((92,92),(92,92)),mode='reflect')# now we crop and upscale the liverslc_crf_pred_liver = crf_pred_liver[:, :, i].astype(SEG_DTYPE)# slc_crf_pred_liver = pred_to_use[:,:,i].astype(SEG_DTYPE)# slc_crf_pred_liver = labelvol_downscaled[:,:,i]if np.count_nonzero(slc_crf_pred_liver) == 0:probvol_step_two[:, :, i, :] = 0else:slc, bbox = zoomliver_UNET_processor(slc, slc_crf_pred_liver)# load slc into network and do forward passnet.blobs['data'].data[...] = slcnet.forward()# scale output back down and insert into the probability volumex1, x2, y1, y2 = bboxleftpad, rightpad = x1, 388 - x2toppad, bottompad = y1, 388 - y2width, height = int(x2 - x1), int(y2 - y1)# now save probabilitiesprob = net.blobs['prob'].data.transpose((0, 2, 3, 1))[0]# 						probvol[:,:,i,:]  = probslc_pred_step_two = np.argmax(prob, axis=2).astype(SEG_DTYPE)slc_pred_step_two = to_scale(slc_pred_step_two, (height, width))slc_pred_step_two = np.pad(slc_pred_step_two, ((toppad, bottompad), (leftpad, rightpad)),mode='constant')pred_step_two[:, :, i] = slc_pred_step_twoprob0 = prob[:, :, 0].astype(IMG_DTYPE)  # use IMG_DTYPE bcoz we've probabiblities, not hard labelsprob0 = to_scale(prob0, (height, width))prob0 = np.pad(prob0, ((toppad, bottompad), (leftpad, rightpad)), mode='constant')##prob1 = prob[:, :, 1].astype(IMG_DTYPE)prob1 = to_scale(prob1, (height, width))prob1 = np.pad(prob1, ((toppad, bottompad), (leftpad, rightpad)), mode='constant')probvol_step_two[:, :, i, 0] = prob0probvol_step_two[:, :, i, 1] = prob1# probvol_step_two[bbox[0]:bbox[0] + bbox[1], bbox[2]:bbox[2] + bbox[3], i, :] =logging.info("Lesion scores after step 2 before CRF")# pred_to_use = probvol_step_two.argmax(3) == 2pred_to_use = pred_step_two.astype(SEG_DTYPE)label_to_use = labelvol_downscaled == 2volumescore_lesion = scorer(pred_to_use, label_to_use)# Save lesion npy probabilitiesif config.save_probability_volumes:np.save(os.path.join(config.output_dir, os.path.basename(volpaths[1])) + ".lesion.npy",probvol_step_two)### SAVE PLOTSif config.plot_every_n_slices > 0:for i in range(0, imgvol_downscaled.shape[2], config.plot_every_n_slices):pred_vol_bothsteps = pred_step_onepred_vol_bothsteps[pred_step_two == 1] = 2liverdc = metric.dc(pred_step_one[:, :, i], labelvol_downscaled[:, :, i] == 1)lesiondc = metric.dc(pred_step_two[:, :, i], labelvol_downscaled[:, :, i] == 2)fname = os.path.join(config.output_dir, os.path.basename(volpaths[1]))fname += "_slc" + str(i) + "_"fname += "liv" + str(liverdc) + "_les" + str(lesiondc) + ".png"# logging.info("Plotting "+fname)miccaiimshow(imgvol_downscaled[:, :, i], labelvol_downscaled[:, :, i],[labelvol_downscaled[:, :, i], pred_vol_bothsteps[:, :, i]], fname=fname,titles=["Ground Truth", "Prediction"], plot_separate_img=True)logging.info("Now running LESION CRF on Liver")crf_params = {'ignore_memory': True, 'bilateral_intensity_std': 0.16982742320252908,'bilateral_w': 6.406401876489639,'pos_w': 2.3422381267344132, 'bilateral_x_std': 284.5377968491542,'pos_x_std': 23.636281254341867,'max_iterations': 10}pro = CRFProcessor.CRF3DProcessor(**crf_params)crf_pred_lesion = pro.set_data_and_run(imgvol_downscaled, probvol_step_two)volumescore_lesion_crf = scorer(crf_pred_lesion, label_to_use)# Append to results lists so that the average scores can be calculated laterfoldscore_liver.append(volumescore_liver)foldscore_lesion.append(volumescore_lesion)foldscore_liver_crf.append(volumescore_liver_crf)foldscore_lesion_crf.append(volumescore_lesion_crf)overall_score_liver_crf.append(volumescore_liver_crf)overall_score_lesion_crf.append(volumescore_lesion_crf)overall_score_liver.append(volumescore_liver)overall_score_lesion.append(volumescore_lesion)logging.info("=========================================")logging.info("Average Liver Scores before CRF for this fold: ")logging.info("Dice " + str(get_average_score(foldscore_liver, 'dice')))logging.info("Jaccard " + str(get_average_score(foldscore_liver, 'jaccard')))logging.info("VOE " + str(get_average_score(foldscore_liver, 'voe')))logging.info("RVD " + str(get_average_score(foldscore_liver, 'rvd')))# logging.info("ASSD " + str(get_average_score(foldscore_liver, 'assd')))#logging.info("MSD " + str(get_average_score(foldscore_liver, 'msd')))logging.info("=========================================")logging.info("=========================================")logging.info("Average Liver Scores after CRF for this fold: ")logging.info("Dice " + str(get_average_score(foldscore_liver_crf, 'dice')))logging.info("Jaccard " + str(get_average_score(foldscore_liver_crf, 'jaccard')))logging.info("VOE " + str(get_average_score(foldscore_liver_crf, 'voe')))logging.info("RVD " + str(get_average_score(foldscore_liver_crf, 'rvd')))# logging.info("ASSD " + str(get_average_score(foldscore_liver_crf, 'assd')))# logging.info("MSD " + str(get_average_score(foldscore_liver_crf, 'msd')))logging.info("=========================================")logging.info("=========================================")logging.info("Average Lesion Scores before CRF for this fold: ")logging.info("Dice " + str(get_average_score(foldscore_lesion, 'dice')))logging.info("Jaccard " + str(get_average_score(foldscore_lesion, 'jaccard')))logging.info("VOE " + str(get_average_score(foldscore_lesion, 'voe')))logging.info("RVD " + str(get_average_score(foldscore_lesion, 'rvd')))#logging.info("ASSD " + str(get_average_score(foldscore_lesion, 'assd')))#logging.info("MSD " + str(get_average_score(foldscore_lesion, 'msd')))logging.info("=========================================")logging.info("=========================================")logging.info("Average Lesion Scores AFTER CRF for this fold: ")logging.info("Dice " + str(get_average_score(foldscore_lesion_crf, 'dice')))logging.info("Jaccard " + str(get_average_score(foldscore_lesion_crf, 'jaccard')))logging.info("VOE " + str(get_average_score(foldscore_lesion_crf, 'voe')))logging.info("RVD " + str(get_average_score(foldscore_lesion_crf, 'rvd')))#logging.info("ASSD " + str(get_average_score(foldscore_lesion_crf, 'assd')))# logging.info("MSD " + str(get_average_score(foldscore_lesion_crf, 'msd')))logging.info("=========================================")logging.info("=========================================")logging.info("DONE WITH PROCESSING ALL FOLDS. NOW THE OVERALL RESULTS COME")logging.info("=========================================")logging.info("Average Liver Scores before CRF overall: ")logging.info("Dice " + str(get_average_score(overall_score_liver, 'dice')))logging.info("Jaccard " + str(get_average_score(overall_score_liver, 'jaccard')))logging.info("VOE " + str(get_average_score(overall_score_liver, 'voe')))logging.info("RVD " + str(get_average_score(overall_score_liver, 'rvd', mode='abs')))logging.info("ASSD " + str(get_average_score(overall_score_liver, 'assd')))# logging.info("MSD " + str(get_average_score(overall_score_liver, 'msd')))# logging.info("=========================================")logging.info("=========================================")logging.info("Average Liver Scores after CRF overall: ")logging.info("Dice " + str(get_average_score(overall_score_liver_crf, 'dice')))logging.info("Jaccard " + str(get_average_score(overall_score_liver_crf, 'jaccard')))logging.info("VOE " + str(get_average_score(overall_score_liver_crf, 'voe')))logging.info("RVD " + str(get_average_score(overall_score_liver_crf, 'rvd', mode='abs')))#logging.info("ASSD " + str(get_average_score(overall_score_liver_crf, 'assd')))#logging.info("MSD " + str(get_average_score(overall_score_liver_crf, 'msd')))logging.info("=========================================")logging.info("=========================================")logging.info("Average Lesion Scores before step2 CRF overall: ")logging.info("Dice " + str(get_average_score(overall_score_lesion, 'dice')))logging.info("Jaccard " + str(get_average_score(overall_score_lesion, 'jaccard')))logging.info("VOE " + str(get_average_score(overall_score_lesion, 'voe')))logging.info("RVD " + str(get_average_score(overall_score_lesion, 'rvd', mode='abs')))#logging.info("ASSD " + str(get_average_score(overall_score_lesion, 'assd')))#logging.info("MSD " + str(get_average_score(overall_score_lesion, 'msd')))logging.info("=========================================")logging.info("=========================================")logging.info("Average Lesion Scores after step2 CRF overall: ")logging.info("Dice " + str(get_average_score(overall_score_lesion_crf, 'dice')))logging.info("Jaccard " + str(get_average_score(overall_score_lesion_crf, 'jaccard')))logging.info("VOE " + str(get_average_score(overall_score_lesion_crf, 'voe')))logging.info("RVD " + str(get_average_score(overall_score_lesion_crf, 'rvd', mode='abs')))#logging.info("ASSD " + str(get_average_score(overall_score_lesion_crf, 'assd')))# logging.info("MSD " + str(get_average_score(overall_score_lesion_crf, 'msd')))logging.info("=========================================")# Creating CSVcsvarray = np.zeros((len(overall_score_liver), 13))csvarray[:, 0] = range(1, len(overall_score_liver) + 1)# csvarray[:,1] = [s['dice'] for s in overall_score_liver]d = overall_score_liver.iteritems()for i in range(6):d.next()[1]csvarray[:, i + 1] = d.next()[1]d = overall_score_lesion.iteritems()for i in range(6):d.next()[1]csvarray[:, i + 1 + 6] = d.next()[1]np.savetxt("Numbers.csv", csvarray, delimiter=",")except:logging.exception("Exception happend...")IPython.embed()
查看全文
如若内容造成侵权/违法违规/事实不符,请联系编程学习网邮箱:809451989@qq.com进行投诉反馈,一经查实,立即删除!

相关文章

  1. WebGL教程3:运动起来

    欢迎来到WebGL教程第三课。这次我们将学习如何移动物体。本课基于NeHe OpenGL教程的第4课。如果你的浏览器已经支持WebGL,请点击此处,你将看到本课WebGL的现场版;如果不支持,你从此处可以获取一个支持WebGL的浏览器。一点提示:这些课程是面向那些具有一定编程知识但没有实…...

    2024/4/23 8:07:19
  2. Jmeter BeanShell 执行多次问题,每发送一次请求执行一次BeanShell问题

    Jmeter BeanShell 执行多次问题,每发送一次请求执行一次BeanShell问题前言:(此问题耗时半天) 提供解决思路的博主又有新问题了。如图所示,写了一个BeanShell从文件中去获取值之后给测试计划的变量赋值。 问题来了,当禁用b的情况下,a只执行一次。当启用b请求的情况下,a执…...

    2024/4/28 20:49:08
  3. STM32F429流水灯设计

    1.首先需要自己写一个bsd_led.c以及bsd_led.h板极支持文件,然后在main.c函数中写一个延时函数,由于F429这块板子是三个GPIO口控制一个LED,所以我参考野火老哥的带参宏的方法写的,话话不多说,开始:2.现在user的文件夹中,新建一个led的文件夹,再新建bsd_led.cyi以及相应的…...

    2024/4/19 14:32:57
  4. Subprocess报FileNotFoundError

    Subprocess报FileNotFoundError 代码如下:运行时报错,FileNotFoundError: pipenv解决方案:因为pipenv找不到,所以需要指定全路径​ which pipenv # 结果显示 /root/anaconda3/bin/pipenv # 因此修改代码中pipenv为全路径的,可成功运行另外,报FileNotFoundError 的错误,有…...

    2024/5/4 19:30:46
  5. [python]subprocess调用外部命令屏蔽输出

    import os, subprocess devNull = open(os.devnull, w) p = subprocess.Popen(args, stdout = devNull) args为要执行的命令...

    2024/4/26 14:42:35
  6. JMeter中BeanShell的使用方法和常用语法

    一、什么是BeanShellBeanShell是由java编写的,是一个轻量级的脚本语言,也相当于一个小巧免费的JAVA源码解释器,支持对象式的脚本语言特性,亦可嵌入到JAVA源代码中,能动态执行JAVA源代码并为其扩展了脚本语言的一些特性。二、JMeter中用BeanShell的好处JMeter也是由java编写…...

    2024/5/4 19:20:18
  7. STM32F429之LTDC驱动图解

    本文基于ST官方demo板STM32F429 Discovery硬件平台,以看图说话的形式给大家讲解LTDC的主要参数配置。关于本文提到的代码部分均摘自本人另一片文章《STM32F429之LTDC代码模板》,LCD硬件为240x320,驱动IC为ili9341。本文目的意在让大家通过几张图就能掌握STM32F429LTDC控制器…...

    2024/4/11 22:00:48
  8. SecureCRT 安装和使用

    SecureCRT 的安装securecrt 是一款支持 ssh 的终端仿真程序。能在 windows 上登录 Unix 和 Linux 服务器主机的软件。 安装包:百度云 密码:assd,有32位的64位的安装程序,并且有注册机。SecureCRT 的使用File -> Quick Connect -> Hostname(url), Username(用户名)。 …...

    2024/4/17 20:36:29
  9. SuperMap iClient3D for WebGL教程(空间分析)- Sightline通视分析

    作者:刘大通视分析是三维GIS中常用的分析功能,用于判断三维场景中任意两点之间的通视情况。可根据在 3D 空间中相对于某表面或多面体要素类提供的障碍的位置,计算观察点与被观察点之间的通视性,沿着这些点之间的视线确定可见性。 通视分析需要用户指定观察点和被观察点,观…...

    2024/4/17 20:35:33
  10. 电商运费模板推荐

    电商运费模板推荐 https://www.cnblogs.com/lintao0823/p/4230425.html...

    2024/4/17 20:36:28
  11. SuperMap iClient3D for WebGL教程- ViewShed3D可视域分析

    作者:桔子 可视域分析是一个很常用的三维空间分析功能,传统的可视域分析是通过GPU分析出结果,然后把结果绘制出来,这样的结果只能看,既没有准确的结果参数也不能进行空间计算。 本节课程就来讲解一个不太一样的应用案例,将可视域分析变成一个既可以看也可以计算的结果。 实…...

    2024/4/20 9:46:42
  12. python学习-subprocess模块

    从python2.4版本开始,可以用subprocess这个模块来产生子进程,并连接到子进程的标准输入/输出/错误中去,还可以得到子进程的返回值。 subprocess意在替代其他几个老的模块或者函数,比如:os.system os.spawn* os.popen* popen2.* commands.* 一、subprocess.Popen subprocess模…...

    2024/4/17 20:36:11
  13. Niushop 产品介绍

    产品背景 产品背景 随着互联网技术已经遍及着千家万户,各种新型产业在互联网的基础上也相继诞生,并发展壮大,众多商家开始建立微信平台,电商平台。其中,电商软件占整个互联网行业软件的百分之九十以上。 Niushop开源电商软件产品,主要功能是为中小型企业进行网上商城的…...

    2024/4/17 20:36:18
  14. jmeter中beanshell的运用

    第一部分 一、什么是Bean Shell BeanShell是一种完全符合Java语法规范的脚本语言,并且又拥有自己的一些语法和方法; BeanShell是一种松散类型的脚本语言(这点和JS类似); BeanShell是用Java写成的,一个小型的、免费的、可以下载的、嵌入式的Java源代码解释器,具有对象脚本语言特…...

    2024/4/17 20:35:57
  15. java 连接mysql

    package assd;import java.sql.Connection; import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; public class JDBC_Test { // 创建静态全局变量 static Connection conn; static…...

    2024/4/17 20:36:04
  16. 梳理STM32F429之通信传输部分---NO.3 串口空闲IDEL中断+DMA+FreeRTOS

    梳理STM32F429之通信传输部分---NO.3 串口空闲IDEL中断+DMA+FreeRTOS目录一、串口空闲IDEL中断:二、串口及串口中断的配置:三、stm32f4xx_it.h 的配置:四、DMA 的配置:五、主函数如果大家有什么问题,欢迎在下面评论交流!串口部分的详解:梳理STM32F429之通信传输部分---N…...

    2024/4/17 20:36:36
  17. JMeter中级篇-5-JMeter中BeanShell的简单举例

    这篇开始,我们来学习一些在基础篇没有介绍的功能,主要是基础篇里,要学的概念太多,这篇我们来学习一个BeanShell 在JMeter上的简单使用举例。首先,我们需要了解下什么是BeanShell, BeanShell是一个小巧免费的JAVA源码解释器,支持对象式的脚本语言特性,亦可嵌入到JAVA源代…...

    2024/4/17 20:37:02
  18. WebGL教程2:添加颜色

    欢迎来到我的第二课WebGL教程!这个时间我们来学习怎样向场景里面添加颜色。它是以Nehe的OpenGL教程第三课为基础的。这儿是这节课的代码在支持WebGL的浏览器运行看起来的效果:一个小的忠告:这些教程所针对的人群是具有一定的编程知识,但是没有真正的3D图形编程经验;目的是…...

    2024/4/17 20:38:17
  19. 基于thinkPHP5.0开发,傻瓜式安装小程序及公众号商城

    CRMEB小程序商城,是我们开发的一款开源电商系统,为中小企业提供最佳的新零售解决方案。采用thinkPHP5.0框架开发,执行效率、扩展性、稳定性值得信赖。永久更新维护,界面美观大方,一键式傻瓜安装小程序及公众号商城。 开源地址: http://github.crmeb.net/u/muzi...

    2024/4/11 22:01:30
  20. 计算字符串字节长度

    var str="assd啊,公告";function byteLength(str){var count = 0;for(var i=0;i<str.length;i++){if(str.charCodeAt(i)>255){count+=2;}else{count++;}}return count;}运行结果:转载于:https://www.cnblogs.com/ChenMM/p/9588048.html...

    2024/4/17 20:35:15

最新文章

  1. 并发-启动线程的正确姿势

    目录 启动线程的正确姿势 Start方法原理解读 Run方法原理解读 常见问题 启动线程的正确姿势 start()与run()方法的比较测试结果可以看出&#xff0c;runnable.run()方法是由main线程执行的&#xff0c;而要子线程执行就一定要先调用start()启动新线程去执行run方法并不能成…...

    2024/5/5 0:34:35
  2. 梯度消失和梯度爆炸的一些处理方法

    在这里是记录一下梯度消失或梯度爆炸的一些处理技巧。全当学习总结了如有错误还请留言&#xff0c;在此感激不尽。 权重和梯度的更新公式如下&#xff1a; w w − η ⋅ ∇ w w w - \eta \cdot \nabla w ww−η⋅∇w 个人通俗的理解梯度消失就是网络模型在反向求导的时候出…...

    2024/3/20 10:50:27
  3. ModStartCMS(支持Laravel 9)v8.3.0

    ModStart 是一个基于 Laravel 模块化极速开发框架。模块市场拥有丰富的功能应用&#xff0c;支持后台一键快速安装&#xff0c;让开发者能快的实现业务功能开发。 系统完全开源&#xff0c;基于 Apache 2.0 开源协议&#xff0c;免费且不限制商业使用。 功能特性 丰富的模块市…...

    2024/5/4 5:27:17
  4. ssm框架中各层级介绍

    1、Spring&#xff08;业务逻辑层&#xff09;&#xff1a; Spring框架提供了依赖注入&#xff08;DI&#xff09;和面向切面编程&#xff08;AOP&#xff09;等功能&#xff0c;可以帮助管理Java应用程序中的对象依赖关系和提供横切关注点的支持。 在SSM框架中&#xff0c;S…...

    2024/5/3 3:42:05
  5. ChatGPT 初学者指南

    原文&#xff1a;ChatGPT for Beginners 译者&#xff1a;飞龙 协议&#xff1a;CC BY-NC-SA 4.0 介绍 如果您一直关注新闻和趋势&#xff0c;您可能已经在某个地方读到或听到过&#xff0c;Sam Altman 的生成式人工智能平台 ChatGPT 已经将人工智能推向了一个新的高度 - 许多…...

    2024/5/4 11:20:55
  6. 【外汇早评】美通胀数据走低,美元调整

    原标题:【外汇早评】美通胀数据走低,美元调整昨日美国方面公布了新一期的核心PCE物价指数数据,同比增长1.6%,低于前值和预期值的1.7%,距离美联储的通胀目标2%继续走低,通胀压力较低,且此前美国一季度GDP初值中的消费部分下滑明显,因此市场对美联储后续更可能降息的政策…...

    2024/5/4 23:54:56
  7. 【原油贵金属周评】原油多头拥挤,价格调整

    原标题:【原油贵金属周评】原油多头拥挤,价格调整本周国际劳动节,我们喜迎四天假期,但是整个金融市场确实流动性充沛,大事频发,各个商品波动剧烈。美国方面,在本周四凌晨公布5月份的利率决议和新闻发布会,维持联邦基金利率在2.25%-2.50%不变,符合市场预期。同时美联储…...

    2024/5/4 23:54:56
  8. 【外汇周评】靓丽非农不及疲软通胀影响

    原标题:【外汇周评】靓丽非农不及疲软通胀影响在刚结束的周五,美国方面公布了新一期的非农就业数据,大幅好于前值和预期,新增就业重新回到20万以上。具体数据: 美国4月非农就业人口变动 26.3万人,预期 19万人,前值 19.6万人。 美国4月失业率 3.6%,预期 3.8%,前值 3…...

    2024/5/4 23:54:56
  9. 【原油贵金属早评】库存继续增加,油价收跌

    原标题:【原油贵金属早评】库存继续增加,油价收跌周三清晨公布美国当周API原油库存数据,上周原油库存增加281万桶至4.692亿桶,增幅超过预期的74.4万桶。且有消息人士称,沙特阿美据悉将于6月向亚洲炼油厂额外出售更多原油,印度炼油商预计将每日获得至多20万桶的额外原油供…...

    2024/5/4 23:55:17
  10. 【外汇早评】日本央行会议纪要不改日元强势

    原标题:【外汇早评】日本央行会议纪要不改日元强势近两日日元大幅走强与近期市场风险情绪上升,避险资金回流日元有关,也与前一段时间的美日贸易谈判给日本缓冲期,日本方面对汇率问题也避免继续贬值有关。虽然今日早间日本央行公布的利率会议纪要仍然是支持宽松政策,但这符…...

    2024/5/4 23:54:56
  11. 【原油贵金属早评】欧佩克稳定市场,填补伊朗问题的影响

    原标题:【原油贵金属早评】欧佩克稳定市场,填补伊朗问题的影响近日伊朗局势升温,导致市场担忧影响原油供给,油价试图反弹。此时OPEC表态稳定市场。据消息人士透露,沙特6月石油出口料将低于700万桶/日,沙特已经收到石油消费国提出的6月份扩大出口的“适度要求”,沙特将满…...

    2024/5/4 23:55:05
  12. 【外汇早评】美欲与伊朗重谈协议

    原标题:【外汇早评】美欲与伊朗重谈协议美国对伊朗的制裁遭到伊朗的抗议,昨日伊朗方面提出将部分退出伊核协议。而此行为又遭到欧洲方面对伊朗的谴责和警告,伊朗外长昨日回应称,欧洲国家履行它们的义务,伊核协议就能保证存续。据传闻伊朗的导弹已经对准了以色列和美国的航…...

    2024/5/4 23:54:56
  13. 【原油贵金属早评】波动率飙升,市场情绪动荡

    原标题:【原油贵金属早评】波动率飙升,市场情绪动荡因中美贸易谈判不安情绪影响,金融市场各资产品种出现明显的波动。随着美国与中方开启第十一轮谈判之际,美国按照既定计划向中国2000亿商品征收25%的关税,市场情绪有所平复,已经开始接受这一事实。虽然波动率-恐慌指数VI…...

    2024/5/4 23:55:16
  14. 【原油贵金属周评】伊朗局势升温,黄金多头跃跃欲试

    原标题:【原油贵金属周评】伊朗局势升温,黄金多头跃跃欲试美国和伊朗的局势继续升温,市场风险情绪上升,避险黄金有向上突破阻力的迹象。原油方面稍显平稳,近期美国和OPEC加大供给及市场需求回落的影响,伊朗局势并未推升油价走强。近期中美贸易谈判摩擦再度升级,美国对中…...

    2024/5/4 23:54:56
  15. 【原油贵金属早评】市场情绪继续恶化,黄金上破

    原标题:【原油贵金属早评】市场情绪继续恶化,黄金上破周初中国针对于美国加征关税的进行的反制措施引发市场情绪的大幅波动,人民币汇率出现大幅的贬值动能,金融市场受到非常明显的冲击。尤其是波动率起来之后,对于股市的表现尤其不安。隔夜美国股市出现明显的下行走势,这…...

    2024/5/4 18:20:48
  16. 【外汇早评】美伊僵持,风险情绪继续升温

    原标题:【外汇早评】美伊僵持,风险情绪继续升温昨日沙特两艘油轮再次发生爆炸事件,导致波斯湾局势进一步恶化,市场担忧美伊可能会出现摩擦生火,避险品种获得支撑,黄金和日元大幅走强。美指受中美贸易问题影响而在低位震荡。继5月12日,四艘商船在阿联酋领海附近的阿曼湾、…...

    2024/5/4 23:54:56
  17. 【原油贵金属早评】贸易冲突导致需求低迷,油价弱势

    原标题:【原油贵金属早评】贸易冲突导致需求低迷,油价弱势近日虽然伊朗局势升温,中东地区几起油船被袭击事件影响,但油价并未走高,而是出于调整结构中。由于市场预期局势失控的可能性较低,而中美贸易问题导致的全球经济衰退风险更大,需求会持续低迷,因此油价调整压力较…...

    2024/5/4 23:55:17
  18. 氧生福地 玩美北湖(上)——为时光守候两千年

    原标题:氧生福地 玩美北湖(上)——为时光守候两千年一次说走就走的旅行,只有一张高铁票的距离~ 所以,湖南郴州,我来了~ 从广州南站出发,一个半小时就到达郴州西站了。在动车上,同时改票的南风兄和我居然被分到了一个车厢,所以一路非常愉快地聊了过来。 挺好,最起…...

    2024/5/4 23:55:06
  19. 氧生福地 玩美北湖(中)——永春梯田里的美与鲜

    原标题:氧生福地 玩美北湖(中)——永春梯田里的美与鲜一觉醒来,因为大家太爱“美”照,在柳毅山庄去寻找龙女而错过了早餐时间。近十点,向导坏坏还是带着饥肠辘辘的我们去吃郴州最富有盛名的“鱼头粉”。说这是“十二分推荐”,到郴州必吃的美食之一。 哇塞!那个味美香甜…...

    2024/5/4 23:54:56
  20. 氧生福地 玩美北湖(下)——奔跑吧骚年!

    原标题:氧生福地 玩美北湖(下)——奔跑吧骚年!让我们红尘做伴 活得潇潇洒洒 策马奔腾共享人世繁华 对酒当歌唱出心中喜悦 轰轰烈烈把握青春年华 让我们红尘做伴 活得潇潇洒洒 策马奔腾共享人世繁华 对酒当歌唱出心中喜悦 轰轰烈烈把握青春年华 啊……啊……啊 两…...

    2024/5/4 23:55:06
  21. 扒开伪装医用面膜,翻六倍价格宰客,小姐姐注意了!

    原标题:扒开伪装医用面膜,翻六倍价格宰客,小姐姐注意了!扒开伪装医用面膜,翻六倍价格宰客!当行业里的某一品项火爆了,就会有很多商家蹭热度,装逼忽悠,最近火爆朋友圈的医用面膜,被沾上了污点,到底怎么回事呢? “比普通面膜安全、效果好!痘痘、痘印、敏感肌都能用…...

    2024/5/4 2:59:34
  22. 「发现」铁皮石斛仙草之神奇功效用于医用面膜

    原标题:「发现」铁皮石斛仙草之神奇功效用于医用面膜丽彦妆铁皮石斛医用面膜|石斛多糖无菌修护补水贴19大优势: 1、铁皮石斛:自唐宋以来,一直被列为皇室贡品,铁皮石斛生于海拔1600米的悬崖峭壁之上,繁殖力差,产量极低,所以古代仅供皇室、贵族享用 2、铁皮石斛自古民间…...

    2024/5/4 23:55:16
  23. 丽彦妆\医用面膜\冷敷贴轻奢医学护肤引导者

    原标题:丽彦妆\医用面膜\冷敷贴轻奢医学护肤引导者【公司简介】 广州华彬企业隶属香港华彬集团有限公司,专注美业21年,其旗下品牌: 「圣茵美」私密荷尔蒙抗衰,产后修复 「圣仪轩」私密荷尔蒙抗衰,产后修复 「花茵莳」私密荷尔蒙抗衰,产后修复 「丽彦妆」专注医学护…...

    2024/5/4 23:54:58
  24. 广州械字号面膜生产厂家OEM/ODM4项须知!

    原标题:广州械字号面膜生产厂家OEM/ODM4项须知!广州械字号面膜生产厂家OEM/ODM流程及注意事项解读: 械字号医用面膜,其实在我国并没有严格的定义,通常我们说的医美面膜指的应该是一种「医用敷料」,也就是说,医用面膜其实算作「医疗器械」的一种,又称「医用冷敷贴」。 …...

    2024/5/4 23:55:01
  25. 械字号医用眼膜缓解用眼过度到底有无作用?

    原标题:械字号医用眼膜缓解用眼过度到底有无作用?医用眼膜/械字号眼膜/医用冷敷眼贴 凝胶层为亲水高分子材料,含70%以上的水分。体表皮肤温度传导到本产品的凝胶层,热量被凝胶内水分子吸收,通过水分的蒸发带走大量的热量,可迅速地降低体表皮肤局部温度,减轻局部皮肤的灼…...

    2024/5/4 23:54:56
  26. 配置失败还原请勿关闭计算机,电脑开机屏幕上面显示,配置失败还原更改 请勿关闭计算机 开不了机 这个问题怎么办...

    解析如下&#xff1a;1、长按电脑电源键直至关机&#xff0c;然后再按一次电源健重启电脑&#xff0c;按F8健进入安全模式2、安全模式下进入Windows系统桌面后&#xff0c;按住“winR”打开运行窗口&#xff0c;输入“services.msc”打开服务设置3、在服务界面&#xff0c;选中…...

    2022/11/19 21:17:18
  27. 错误使用 reshape要执行 RESHAPE,请勿更改元素数目。

    %读入6幅图像&#xff08;每一幅图像的大小是564*564&#xff09; f1 imread(WashingtonDC_Band1_564.tif); subplot(3,2,1),imshow(f1); f2 imread(WashingtonDC_Band2_564.tif); subplot(3,2,2),imshow(f2); f3 imread(WashingtonDC_Band3_564.tif); subplot(3,2,3),imsho…...

    2022/11/19 21:17:16
  28. 配置 已完成 请勿关闭计算机,win7系统关机提示“配置Windows Update已完成30%请勿关闭计算机...

    win7系统关机提示“配置Windows Update已完成30%请勿关闭计算机”问题的解决方法在win7系统关机时如果有升级系统的或者其他需要会直接进入一个 等待界面&#xff0c;在等待界面中我们需要等待操作结束才能关机&#xff0c;虽然这比较麻烦&#xff0c;但是对系统进行配置和升级…...

    2022/11/19 21:17:15
  29. 台式电脑显示配置100%请勿关闭计算机,“准备配置windows 请勿关闭计算机”的解决方法...

    有不少用户在重装Win7系统或更新系统后会遇到“准备配置windows&#xff0c;请勿关闭计算机”的提示&#xff0c;要过很久才能进入系统&#xff0c;有的用户甚至几个小时也无法进入&#xff0c;下面就教大家这个问题的解决方法。第一种方法&#xff1a;我们首先在左下角的“开始…...

    2022/11/19 21:17:14
  30. win7 正在配置 请勿关闭计算机,怎么办Win7开机显示正在配置Windows Update请勿关机...

    置信有很多用户都跟小编一样遇到过这样的问题&#xff0c;电脑时发现开机屏幕显现“正在配置Windows Update&#xff0c;请勿关机”(如下图所示)&#xff0c;而且还需求等大约5分钟才干进入系统。这是怎样回事呢&#xff1f;一切都是正常操作的&#xff0c;为什么开时机呈现“正…...

    2022/11/19 21:17:13
  31. 准备配置windows 请勿关闭计算机 蓝屏,Win7开机总是出现提示“配置Windows请勿关机”...

    Win7系统开机启动时总是出现“配置Windows请勿关机”的提示&#xff0c;没过几秒后电脑自动重启&#xff0c;每次开机都这样无法进入系统&#xff0c;此时碰到这种现象的用户就可以使用以下5种方法解决问题。方法一&#xff1a;开机按下F8&#xff0c;在出现的Windows高级启动选…...

    2022/11/19 21:17:12
  32. 准备windows请勿关闭计算机要多久,windows10系统提示正在准备windows请勿关闭计算机怎么办...

    有不少windows10系统用户反映说碰到这样一个情况&#xff0c;就是电脑提示正在准备windows请勿关闭计算机&#xff0c;碰到这样的问题该怎么解决呢&#xff0c;现在小编就给大家分享一下windows10系统提示正在准备windows请勿关闭计算机的具体第一种方法&#xff1a;1、2、依次…...

    2022/11/19 21:17:11
  33. 配置 已完成 请勿关闭计算机,win7系统关机提示“配置Windows Update已完成30%请勿关闭计算机”的解决方法...

    今天和大家分享一下win7系统重装了Win7旗舰版系统后&#xff0c;每次关机的时候桌面上都会显示一个“配置Windows Update的界面&#xff0c;提示请勿关闭计算机”&#xff0c;每次停留好几分钟才能正常关机&#xff0c;导致什么情况引起的呢&#xff1f;出现配置Windows Update…...

    2022/11/19 21:17:10
  34. 电脑桌面一直是清理请关闭计算机,windows7一直卡在清理 请勿关闭计算机-win7清理请勿关机,win7配置更新35%不动...

    只能是等着&#xff0c;别无他法。说是卡着如果你看硬盘灯应该在读写。如果从 Win 10 无法正常回滚&#xff0c;只能是考虑备份数据后重装系统了。解决来方案一&#xff1a;管理员运行cmd&#xff1a;net stop WuAuServcd %windir%ren SoftwareDistribution SDoldnet start WuA…...

    2022/11/19 21:17:09
  35. 计算机配置更新不起,电脑提示“配置Windows Update请勿关闭计算机”怎么办?

    原标题&#xff1a;电脑提示“配置Windows Update请勿关闭计算机”怎么办&#xff1f;win7系统中在开机与关闭的时候总是显示“配置windows update请勿关闭计算机”相信有不少朋友都曾遇到过一次两次还能忍但经常遇到就叫人感到心烦了遇到这种问题怎么办呢&#xff1f;一般的方…...

    2022/11/19 21:17:08
  36. 计算机正在配置无法关机,关机提示 windows7 正在配置windows 请勿关闭计算机 ,然后等了一晚上也没有关掉。现在电脑无法正常关机...

    关机提示 windows7 正在配置windows 请勿关闭计算机 &#xff0c;然后等了一晚上也没有关掉。现在电脑无法正常关机以下文字资料是由(历史新知网www.lishixinzhi.com)小编为大家搜集整理后发布的内容&#xff0c;让我们赶快一起来看一下吧&#xff01;关机提示 windows7 正在配…...

    2022/11/19 21:17:05
  37. 钉钉提示请勿通过开发者调试模式_钉钉请勿通过开发者调试模式是真的吗好不好用...

    钉钉请勿通过开发者调试模式是真的吗好不好用 更新时间:2020-04-20 22:24:19 浏览次数:729次 区域: 南阳 > 卧龙 列举网提醒您:为保障您的权益,请不要提前支付任何费用! 虚拟位置外设器!!轨迹模拟&虚拟位置外设神器 专业用于:钉钉,外勤365,红圈通,企业微信和…...

    2022/11/19 21:17:05
  38. 配置失败还原请勿关闭计算机怎么办,win7系统出现“配置windows update失败 还原更改 请勿关闭计算机”,长时间没反应,无法进入系统的解决方案...

    前几天班里有位学生电脑(windows 7系统)出问题了&#xff0c;具体表现是开机时一直停留在“配置windows update失败 还原更改 请勿关闭计算机”这个界面&#xff0c;长时间没反应&#xff0c;无法进入系统。这个问题原来帮其他同学也解决过&#xff0c;网上搜了不少资料&#x…...

    2022/11/19 21:17:04
  39. 一个电脑无法关闭计算机你应该怎么办,电脑显示“清理请勿关闭计算机”怎么办?...

    本文为你提供了3个有效解决电脑显示“清理请勿关闭计算机”问题的方法&#xff0c;并在最后教给你1种保护系统安全的好方法&#xff0c;一起来看看&#xff01;电脑出现“清理请勿关闭计算机”在Windows 7(SP1)和Windows Server 2008 R2 SP1中&#xff0c;添加了1个新功能在“磁…...

    2022/11/19 21:17:03
  40. 请勿关闭计算机还原更改要多久,电脑显示:配置windows更新失败,正在还原更改,请勿关闭计算机怎么办...

    许多用户在长期不使用电脑的时候&#xff0c;开启电脑发现电脑显示&#xff1a;配置windows更新失败&#xff0c;正在还原更改&#xff0c;请勿关闭计算机。。.这要怎么办呢&#xff1f;下面小编就带着大家一起看看吧&#xff01;如果能够正常进入系统&#xff0c;建议您暂时移…...

    2022/11/19 21:17:02
  41. 还原更改请勿关闭计算机 要多久,配置windows update失败 还原更改 请勿关闭计算机,电脑开机后一直显示以...

    配置windows update失败 还原更改 请勿关闭计算机&#xff0c;电脑开机后一直显示以以下文字资料是由(历史新知网www.lishixinzhi.com)小编为大家搜集整理后发布的内容&#xff0c;让我们赶快一起来看一下吧&#xff01;配置windows update失败 还原更改 请勿关闭计算机&#x…...

    2022/11/19 21:17:01
  42. 电脑配置中请勿关闭计算机怎么办,准备配置windows请勿关闭计算机一直显示怎么办【图解】...

    不知道大家有没有遇到过这样的一个问题&#xff0c;就是我们的win7系统在关机的时候&#xff0c;总是喜欢显示“准备配置windows&#xff0c;请勿关机”这样的一个页面&#xff0c;没有什么大碍&#xff0c;但是如果一直等着的话就要两个小时甚至更久都关不了机&#xff0c;非常…...

    2022/11/19 21:17:00
  43. 正在准备配置请勿关闭计算机,正在准备配置windows请勿关闭计算机时间长了解决教程...

    当电脑出现正在准备配置windows请勿关闭计算机时&#xff0c;一般是您正对windows进行升级&#xff0c;但是这个要是长时间没有反应&#xff0c;我们不能再傻等下去了。可能是电脑出了别的问题了&#xff0c;来看看教程的说法。正在准备配置windows请勿关闭计算机时间长了方法一…...

    2022/11/19 21:16:59
  44. 配置失败还原请勿关闭计算机,配置Windows Update失败,还原更改请勿关闭计算机...

    我们使用电脑的过程中有时会遇到这种情况&#xff0c;当我们打开电脑之后&#xff0c;发现一直停留在一个界面&#xff1a;“配置Windows Update失败&#xff0c;还原更改请勿关闭计算机”&#xff0c;等了许久还是无法进入系统。如果我们遇到此类问题应该如何解决呢&#xff0…...

    2022/11/19 21:16:58
  45. 如何在iPhone上关闭“请勿打扰”

    Apple’s “Do Not Disturb While Driving” is a potentially lifesaving iPhone feature, but it doesn’t always turn on automatically at the appropriate time. For example, you might be a passenger in a moving car, but your iPhone may think you’re the one dri…...

    2022/11/19 21:16:57