|
| 1 | +""" |
| 2 | +================================================== |
| 3 | +Column Transformer with Heterogeneous Data Sources |
| 4 | +================================================== |
| 5 | +
|
| 6 | +Datasets can often contain components of that require different feature |
| 7 | +extraction and processing pipelines. This scenario might occur when: |
| 8 | +
|
| 9 | +1. Your dataset consists of heterogeneous data types (e.g. raster images and |
| 10 | + text captions) |
| 11 | +2. Your dataset is stored in a Pandas DataFrame and different columns |
| 12 | + require different processing pipelines. |
| 13 | +
|
| 14 | +This example demonstrates how to use |
| 15 | +:class:`sklearn.compose.ColumnTransformer` on a dataset containing |
| 16 | +different types of features. We use the 20-newsgroups dataset and compute |
| 17 | +standard bag-of-words features for the subject line and body in separate |
| 18 | +pipelines as well as ad hoc features on the body. We combine them (with |
| 19 | +weights) using a ColumnTransformer and finally train a classifier on the |
| 20 | +combined set of features. |
| 21 | +
|
| 22 | +The choice of features is not particularly helpful, but serves to illustrate |
| 23 | +the technique. |
| 24 | +""" |
| 25 | + |
| 26 | +# Author: Matt Terry <matt.terry@gmail.com> |
| 27 | +# |
| 28 | +# License: BSD 3 clause |
| 29 | +from __future__importprint_function |
| 30 | + |
| 31 | +importnumpyasnp |
| 32 | + |
| 33 | +fromsklearn.baseimportBaseEstimator,TransformerMixin |
| 34 | +fromsklearn.datasetsimportfetch_20newsgroups |
| 35 | +fromsklearn.datasets.twenty_newsgroupsimportstrip_newsgroup_footer |
| 36 | +fromsklearn.datasets.twenty_newsgroupsimportstrip_newsgroup_quoting |
| 37 | +fromsklearn.decompositionimportTruncatedSVD |
| 38 | +fromsklearn.feature_extractionimportDictVectorizer |
| 39 | +fromsklearn.feature_extraction.textimportTfidfVectorizer |
| 40 | +fromsklearn.metricsimportclassification_report |
| 41 | +fromsklearn.pipelineimportPipeline |
| 42 | +fromsklearn.composeimportColumnTransformer |
| 43 | +fromsklearn.svmimportSVC |
| 44 | + |
| 45 | + |
| 46 | +classTextStats(BaseEstimator,TransformerMixin): |
| 47 | +"""Extract features from each document for DictVectorizer""" |
| 48 | + |
| 49 | +deffit(self,x,y=None): |
| 50 | +returnself |
| 51 | + |
| 52 | +deftransform(self,posts): |
| 53 | +return [{'length':len(text), |
| 54 | +'num_sentences':text.count('.')} |
| 55 | +fortextinposts] |
| 56 | + |
| 57 | + |
| 58 | +classSubjectBodyExtractor(BaseEstimator,TransformerMixin): |
| 59 | +"""Extract the subject & body from a usenet post in a single pass. |
| 60 | +
|
| 61 | + Takes a sequence of strings and produces a dict of sequences. Keys are |
| 62 | + `subject` and `body`. |
| 63 | + """ |
| 64 | +deffit(self,x,y=None): |
| 65 | +returnself |
| 66 | + |
| 67 | +deftransform(self,posts): |
| 68 | +# construct object dtype array with two columns |
| 69 | +# first column = 'subject' and second column = 'body' |
| 70 | +features=np.empty(shape=(len(posts),2),dtype=object) |
| 71 | +fori,textinenumerate(posts): |
| 72 | +headers,_,bod=text.partition('\n\n') |
| 73 | +bod=strip_newsgroup_footer(bod) |
| 74 | +bod=strip_newsgroup_quoting(bod) |
| 75 | +features[i,1]=bod |
| 76 | + |
| 77 | +prefix='Subject:' |
| 78 | +sub='' |
| 79 | +forlineinheaders.split('\n'): |
| 80 | +ifline.startswith(prefix): |
| 81 | +sub=line[len(prefix):] |
| 82 | +break |
| 83 | +features[i,0]=sub |
| 84 | + |
| 85 | +returnfeatures |
| 86 | + |
| 87 | + |
| 88 | +pipeline=Pipeline([ |
| 89 | +# Extract the subject & body |
| 90 | + ('subjectbody',SubjectBodyExtractor()), |
| 91 | + |
| 92 | +# Use C toolumnTransformer to combine the features from subject and body |
| 93 | + ('union',ColumnTransformer( |
| 94 | + [ |
| 95 | +# Pulling features from the post's subject line (first column) |
| 96 | + ('subject',TfidfVectorizer(min_df=50),0), |
| 97 | + |
| 98 | +# Pipeline for standard bag-of-words model for body (second column) |
| 99 | + ('body_bow',Pipeline([ |
| 100 | + ('tfidf',TfidfVectorizer()), |
| 101 | + ('best',TruncatedSVD(n_components=50)), |
| 102 | + ]),1), |
| 103 | + |
| 104 | +# Pipeline for pulling ad hoc features from post's body |
| 105 | + ('body_stats',Pipeline([ |
| 106 | + ('stats',TextStats()),# returns a list of dicts |
| 107 | + ('vect',DictVectorizer()),# list of dicts -> feature matrix |
| 108 | + ]),1), |
| 109 | + ], |
| 110 | + |
| 111 | +# weight components in ColumnTransformer |
| 112 | +transformer_weights={ |
| 113 | +'subject':0.8, |
| 114 | +'body_bow':0.5, |
| 115 | +'body_stats':1.0, |
| 116 | + } |
| 117 | + )), |
| 118 | + |
| 119 | +# Use a SVC classifier on the combined features |
| 120 | + ('svc',SVC(kernel='linear')), |
| 121 | +]) |
| 122 | + |
| 123 | +# limit the list of categories to make running this example faster. |
| 124 | +categories= ['alt.atheism','talk.religion.misc'] |
| 125 | +train=fetch_20newsgroups(random_state=1, |
| 126 | +subset='train', |
| 127 | +categories=categories, |
| 128 | + ) |
| 129 | +test=fetch_20newsgroups(random_state=1, |
| 130 | +subset='test', |
| 131 | +categories=categories, |
| 132 | + ) |
| 133 | + |
| 134 | +pipeline.fit(train.data,train.target) |
| 135 | +y=pipeline.predict(test.data) |
| 136 | +print(classification_report(y,test.target)) |