Info hash | 9cecadb5dad292a3121c018b161d5ef19c64540b |
Last mirror activity | 1252d,20:48:03 ago |
Size | 1.27MB (1,274,126 bytes) |
Added | 2015-11-29 06:43:35 |
Views | 1302 |
Hits | 1652 |
ID | 3063 |
Type | single |
Downloaded | 249 time(s) |
Uploaded by | ghost |
Filename | Extreme Learning Machine for Regression and Multiclass Classification.pdf |
Mirrors | 0 complete, 0 downloading = 0 mirror(s) total [Log in to see full list] |
Extreme Learning Machine for Regression and Multiclass Classification.pdf | 1.27MB |
Type: Paper
Tags:
Bibtex:
Tags:
Bibtex:
@ARTICLE{6035797, author={Guang-Bin Huang and Hongming Zhou and Xiaojian Ding and Rui Zhang}, journal={Systems, Man, and Cybernetics, Part B: Cybernetics, IEEE Transactions on}, title={Extreme Learning Machine for Regression and Multiclass Classification}, year={2012}, volume={42}, number={2}, pages={513-529}, abstract={Due to the simplicity of their implementations, least square support vector machine (LS-SVM) and proximal support vector machine (PSVM) have been widely used in binary classification applications. The conventional LS-SVM and PSVM cannot be used in regression and multiclass classification applications directly, although variants of LS-SVM and PSVM have been proposed to handle such cases. This paper shows that both LS-SVM and PSVM can be simplified further and a unified learning framework of LS-SVM, PSVM, and other regularization algorithms referred to extreme learning machine (ELM) can be built. ELM works for the “generalized” single-hidden-layer feedforward networks (SLFNs), but the hidden layer (or called feature mapping) in ELM need not be tuned. Such SLFNs include but are not limited to SVM, polynomial network, and the conventional feedforward neural networks. This paper shows the following: 1) ELM provides a unified learning platform with a widespread type of feature mappings and can be applied in regression and multiclass classification applications directly; 2) from the optimization method point of view, ELM has milder optimization constraints compared to LS-SVM and PSVM; 3) in theory, compared to ELM, LS-SVM and PSVM achieve suboptimal solutions and require higher computational complexity; and 4) in theory, ELM can approximate any target continuous function and classify any disjoint regions. As verified by the simulation results, ELM tends to have better scalability and achieve similar (for regression and binary class cases) or much better (for multiclass cases) generalization performance at much faster learning speed (up to thousands times) than traditional SVM and LS-SVM.}, keywords={computational complexity;feedforward neural nets;learning (artificial intelligence);least squares approximations;optimisation;pattern classification;polynomials;regression analysis;support vector machines;ELM;LS-SVM;PSVM;binary classification applications;computational complexity;conventional feedforward neural networks;extreme learning machine;feature mapping;generalized single-hidden-layer feedforward networks;least square support vector machine;multiclass classification;optimization method;polynomial network;proximal support vector machine;regression;regularization algorithms;Approximation methods;Feedforward neural networks;Kernel;Machine learning;Optimization;Support vector machines;Training;Extreme learning machine (ELM);feature mapping;kernel;least square support vector machine (LS-SVM);proximal support vector machine (PSVM);regularization network}, doi={10.1109/TSMCB.2011.2168604}, ISSN={1083-4419}, month={April},}