每次迭代在多个 CPU 上训练不同的 scikit-learn 分类器
Training different scikit-learn classifiers on multiple CPUs for each iteration
我有一个脚本可以随机生成一组数据并训练几个分类器将它们相互比较(这与 http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html 非常相似):
from itertools import product
import numpy as np
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.datasets import make_classification
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
names = ["Linear SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "Linear Discriminant Analysis",
"Quadratic Discriminant Analysis"]
def griddy_mcsearchface(num_samples, num_feats, num_feats_to_remove):
classifiers = [
SVC(kernel="linear", C=0.025),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(), GaussianNB(),
LinearDiscriminantAnalysis(),
QuadraticDiscriminantAnalysis()]
classifiers2 = [
SVC(kernel="linear", C=0.025),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(), GaussianNB(),
LinearDiscriminantAnalysis(),
QuadraticDiscriminantAnalysis()]
X, y = make_classification(n_samples=num_samples, n_features=num_feats, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2)
for name, clf, clf2 in zip(names, classifiers, classifiers2):
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Remove 40% of the features.
clf2.fit(X_train[:,:-num_feats_to_remove], y_train)
score2 = clf2.score(X_test[:,:-num_feats_to_remove], y_test)
yield (num_samples, num_feats, num_feats_to_remove, name, score, score2)
然后 运行 它:
_samples = [100, 200, 500, 1000, 2000, 5000, 10000, 20000, 50000, 100000]
_feats = [10, 20, 50, 100, 200, 500, 10000]
_feats_to_rm = [5, 10, 25, 50, 100, 250]
for num_samples, num_feats, num_feats_to_remove in product(_samples, _feats, _feats_to_rm):
if num_feats <= num_feats_to_remove:
continue
for i in griddy_mcsearchface(num_samples, num_feats, num_feats_to_remove):
print (i)
脚本输出如下:
(100, 10, 5, 'Linear SVM', 1.0, 0.40000000000000002)
(100, 10, 5, 'Decision Tree', 1.0, 0.65000000000000002)
(100, 10, 5, 'Random Forest', 1.0, 0.90000000000000002)
(100, 10, 5, 'AdaBoost', 1.0, 0.65000000000000002)
(100, 10, 5, 'Naive Bayes', 1.0, 0.75)
(100, 10, 5, 'Linear Discriminant Analysis', 1.0, 0.40000000000000002)
(100, 10, 5, 'Quadratic Discriminant Analysis', 1.0, 0.84999999999999998)
(100, 20, 5, 'Linear SVM', 1.0, 1.0)
(100, 20, 5, 'Decision Tree', 0.94999999999999996, 0.94999999999999996)
(100, 20, 5, 'Random Forest', 0.80000000000000004, 0.75)
(100, 20, 5, 'AdaBoost', 1.0, 0.94999999999999996)
(100, 20, 5, 'Naive Bayes', 1.0, 1.0)
(100, 20, 5, 'Linear Discriminant Analysis', 1.0, 1.0)
(100, 20, 5, 'Quadratic Discriminant Analysis', 0.84999999999999998, 0.94999999999999996)
(100, 20, 10, 'Linear SVM', 0.94999999999999996, 0.65000000000000002)
(100, 20, 10, 'Decision Tree', 0.94999999999999996, 0.59999999999999998)
(100, 20, 10, 'Random Forest', 0.75, 0.69999999999999996)
(100, 20, 10, 'AdaBoost', 0.94999999999999996, 0.69999999999999996)
(100, 20, 10, 'Naive Bayes', 0.94999999999999996, 0.75)
但是 clf.fit()
现在是单线程的。
假设我有足够的线程来 运行 每次迭代的所有分类器,我如何能够为 for num_samples, num_feats, num_feats_to_remove in product(_samples, _feats, _feats_to_rm)
的每次迭代使用不同的线程来训练分类器?
如果我被限制为 4 或 8 个线程,但我需要为每次迭代训练 >4 或 >8 个分类器,它是如何完成的?
这不是一个答案,而是对您第一个问题的粗略回答,
How would I be able to train the classifiers using different threads
for every iteration of for num_samples, num_feats, num_feats_to_remove
in product(_samples, _feats, _feats_to_rm)
我假设你的意思是对于 for name, clf, clf2 in zip(names, classifiers, classifiers2):
的每次迭代,你希望 clf
和 clf2
在不同的处理器上训练。
这里是一些工作代码作为起点(实现不佳,但总体思路是存在的):
from itertools import product
import numpy as np
import multiprocessing
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.datasets import make_classification
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
names = ["Linear SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "Linear Discriminant Analysis",
"Quadratic Discriminant Analysis"]
# def mp_handler():
# p = multiprocessing.Pool(8)
# p.map(mp_worker, data)
def mp_worker((name, clf, X_train, y_train, X_test, y_test, num_features_to_remove)):
if num_features_to_remove == False:
clf.fit(X_train, y_train)
return ('score1', clf.score(X_test, y_test))
clf.fit(X_train[:,:-num_feats_to_remove], y_train)
return ('score2', clf.score(X_test[:,:-num_feats_to_remove], y_test))
def griddy_mcsearchface(num_samples, num_feats, num_feats_to_remove):
classifiers = [
SVC(kernel="linear", C=0.025),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(), GaussianNB(),
LinearDiscriminantAnalysis(),
QuadraticDiscriminantAnalysis()]
classifiers2 = [
SVC(kernel="linear", C=0.025),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(), GaussianNB(),
LinearDiscriminantAnalysis(),
QuadraticDiscriminantAnalysis()]
X, y = make_classification(n_samples=num_samples, n_features=num_feats, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2)
for name, clf, clf2 in zip(names, classifiers, classifiers2):
p = multiprocessing.Pool(2) #set to 2 for using two processors; one processor per classfier
#The integer parameter you pass to Pool is equal to the number of SETS of classifiers you have
data = (name, clf, X_train, y_train, X_test, y_test, False), (name, clf, X_train, y_train, X_test, y_test, num_feats_to_remove)
res = p.map(mp_worker, data) #this splits the two classification tasks acrpss two separate processors
for i,j in res: #parse the results
if i == 'score1':
score1 = j
else:
score2 = j
yield (num_samples, num_feats, num_feats_to_remove, name, score1, score2)
if __name__ == '__main__':
_samples = [100, 200]
_feats = [10, 20]
_feats_to_rm = [5, 10]
for num_samples, num_feats, num_feats_to_remove in product(_samples, _feats, _feats_to_rm):
if num_feats <= num_feats_to_remove:
continue
for i in griddy_mcsearchface(num_samples, num_feats, num_feats_to_remove):
print (i)
如果我误解了你的问题,那么可以修改上面代码中的一般原则以满足你的需要。我从上面代码中接受的答案 here 中得出。
我有一个脚本可以随机生成一组数据并训练几个分类器将它们相互比较(这与 http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html 非常相似):
from itertools import product
import numpy as np
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.datasets import make_classification
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
names = ["Linear SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "Linear Discriminant Analysis",
"Quadratic Discriminant Analysis"]
def griddy_mcsearchface(num_samples, num_feats, num_feats_to_remove):
classifiers = [
SVC(kernel="linear", C=0.025),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(), GaussianNB(),
LinearDiscriminantAnalysis(),
QuadraticDiscriminantAnalysis()]
classifiers2 = [
SVC(kernel="linear", C=0.025),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(), GaussianNB(),
LinearDiscriminantAnalysis(),
QuadraticDiscriminantAnalysis()]
X, y = make_classification(n_samples=num_samples, n_features=num_feats, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2)
for name, clf, clf2 in zip(names, classifiers, classifiers2):
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Remove 40% of the features.
clf2.fit(X_train[:,:-num_feats_to_remove], y_train)
score2 = clf2.score(X_test[:,:-num_feats_to_remove], y_test)
yield (num_samples, num_feats, num_feats_to_remove, name, score, score2)
然后 运行 它:
_samples = [100, 200, 500, 1000, 2000, 5000, 10000, 20000, 50000, 100000]
_feats = [10, 20, 50, 100, 200, 500, 10000]
_feats_to_rm = [5, 10, 25, 50, 100, 250]
for num_samples, num_feats, num_feats_to_remove in product(_samples, _feats, _feats_to_rm):
if num_feats <= num_feats_to_remove:
continue
for i in griddy_mcsearchface(num_samples, num_feats, num_feats_to_remove):
print (i)
脚本输出如下:
(100, 10, 5, 'Linear SVM', 1.0, 0.40000000000000002)
(100, 10, 5, 'Decision Tree', 1.0, 0.65000000000000002)
(100, 10, 5, 'Random Forest', 1.0, 0.90000000000000002)
(100, 10, 5, 'AdaBoost', 1.0, 0.65000000000000002)
(100, 10, 5, 'Naive Bayes', 1.0, 0.75)
(100, 10, 5, 'Linear Discriminant Analysis', 1.0, 0.40000000000000002)
(100, 10, 5, 'Quadratic Discriminant Analysis', 1.0, 0.84999999999999998)
(100, 20, 5, 'Linear SVM', 1.0, 1.0)
(100, 20, 5, 'Decision Tree', 0.94999999999999996, 0.94999999999999996)
(100, 20, 5, 'Random Forest', 0.80000000000000004, 0.75)
(100, 20, 5, 'AdaBoost', 1.0, 0.94999999999999996)
(100, 20, 5, 'Naive Bayes', 1.0, 1.0)
(100, 20, 5, 'Linear Discriminant Analysis', 1.0, 1.0)
(100, 20, 5, 'Quadratic Discriminant Analysis', 0.84999999999999998, 0.94999999999999996)
(100, 20, 10, 'Linear SVM', 0.94999999999999996, 0.65000000000000002)
(100, 20, 10, 'Decision Tree', 0.94999999999999996, 0.59999999999999998)
(100, 20, 10, 'Random Forest', 0.75, 0.69999999999999996)
(100, 20, 10, 'AdaBoost', 0.94999999999999996, 0.69999999999999996)
(100, 20, 10, 'Naive Bayes', 0.94999999999999996, 0.75)
但是 clf.fit()
现在是单线程的。
假设我有足够的线程来 运行 每次迭代的所有分类器,我如何能够为 for num_samples, num_feats, num_feats_to_remove in product(_samples, _feats, _feats_to_rm)
的每次迭代使用不同的线程来训练分类器?
如果我被限制为 4 或 8 个线程,但我需要为每次迭代训练 >4 或 >8 个分类器,它是如何完成的?
这不是一个答案,而是对您第一个问题的粗略回答,
How would I be able to train the classifiers using different threads
for every iteration of for num_samples, num_feats, num_feats_to_remove in product(_samples, _feats, _feats_to_rm)
我假设你的意思是对于 for name, clf, clf2 in zip(names, classifiers, classifiers2):
的每次迭代,你希望 clf
和 clf2
在不同的处理器上训练。
这里是一些工作代码作为起点(实现不佳,但总体思路是存在的):
from itertools import product
import numpy as np
import multiprocessing
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.datasets import make_classification
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
names = ["Linear SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "Linear Discriminant Analysis",
"Quadratic Discriminant Analysis"]
# def mp_handler():
# p = multiprocessing.Pool(8)
# p.map(mp_worker, data)
def mp_worker((name, clf, X_train, y_train, X_test, y_test, num_features_to_remove)):
if num_features_to_remove == False:
clf.fit(X_train, y_train)
return ('score1', clf.score(X_test, y_test))
clf.fit(X_train[:,:-num_feats_to_remove], y_train)
return ('score2', clf.score(X_test[:,:-num_feats_to_remove], y_test))
def griddy_mcsearchface(num_samples, num_feats, num_feats_to_remove):
classifiers = [
SVC(kernel="linear", C=0.025),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(), GaussianNB(),
LinearDiscriminantAnalysis(),
QuadraticDiscriminantAnalysis()]
classifiers2 = [
SVC(kernel="linear", C=0.025),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(), GaussianNB(),
LinearDiscriminantAnalysis(),
QuadraticDiscriminantAnalysis()]
X, y = make_classification(n_samples=num_samples, n_features=num_feats, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2)
for name, clf, clf2 in zip(names, classifiers, classifiers2):
p = multiprocessing.Pool(2) #set to 2 for using two processors; one processor per classfier
#The integer parameter you pass to Pool is equal to the number of SETS of classifiers you have
data = (name, clf, X_train, y_train, X_test, y_test, False), (name, clf, X_train, y_train, X_test, y_test, num_feats_to_remove)
res = p.map(mp_worker, data) #this splits the two classification tasks acrpss two separate processors
for i,j in res: #parse the results
if i == 'score1':
score1 = j
else:
score2 = j
yield (num_samples, num_feats, num_feats_to_remove, name, score1, score2)
if __name__ == '__main__':
_samples = [100, 200]
_feats = [10, 20]
_feats_to_rm = [5, 10]
for num_samples, num_feats, num_feats_to_remove in product(_samples, _feats, _feats_to_rm):
if num_feats <= num_feats_to_remove:
continue
for i in griddy_mcsearchface(num_samples, num_feats, num_feats_to_remove):
print (i)
如果我误解了你的问题,那么可以修改上面代码中的一般原则以满足你的需要。我从上面代码中接受的答案 here 中得出。