wekajava,wekajava教程

本文目录一览:

求助 weka 的ID3算法java源码

/*

* This program is free software; you can redistribute it and/or modify

* it under the terms of the GNU General Public License as published by

* the Free Software Foundation; either version 2 of the License, or

* (at your option) any later version.

*

* This program is distributed in the hope that it will be useful,

* but WITHOUT ANY WARRANTY; without even the implied warranty of

* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the

* GNU General Public License for more details.

*

* You should have received a copy of the GNU General Public License

* along with this program; if not, write to the Free Software

* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.

*/

/*

* Id3.java

* Copyright (C) 1999 University of Waikato, Hamilton, New Zealand

*

*/

package weka.classifiers.trees;

import weka.classifiers.Classifier;

import weka.classifiers.Sourcable;

import weka.core.Attribute;

import weka.core.Capabilities;

import weka.core.Instance;

import weka.core.Instances;

import weka.core.NoSupportForMissingValuesException;

import weka.core.RevisionUtils;

import weka.core.TechnicalInformation;

import weka.core.TechnicalInformationHandler;

import weka.core.Utils;

import weka.core.Capabilities.Capability;

import weka.core.TechnicalInformation.Field;

import weka.core.TechnicalInformation.Type;

import java.util.Enumeration;

/**

!– globalinfo-start —

* Class for constructing an unpruned decision tree based on the ID3 algorithm. Can only deal with nominal attributes. No missing values allowed. Empty leaves may result in unclassified instances. For more information see: br/

* br/

* R. Quinlan (1986). Induction of decision trees. Machine Learning. 1(1):81-106.

* p/

!– globalinfo-end —

*

!– technical-bibtex-start —

* BibTeX:

* pre

* #64;article{Quinlan1986,

* author = {R. Quinlan},

* journal = {Machine Learning},

* number = {1},

* pages = {81-106},

* title = {Induction of decision trees},

* volume = {1},

* year = {1986}

* }

* /pre

* p/

!– technical-bibtex-end —

*

!– options-start —

* Valid options are: p/

*

* pre -D

* If set, classifier is run in debug mode and

* may output additional info to the console/pre

*

!– options-end —

*

* @author Eibe Frank (eibe@cs.waikato.ac.nz)

* @version $Revision: 6404 $

*/

public class Id3

extends Classifier

implements TechnicalInformationHandler, Sourcable {

/** for serialization */

static final long serialVersionUID = -2693678647096322561L;

/** The node’s successors. */

private Id3[] m_Successors;

/** Attribute used for splitting. */

private Attribute m_Attribute;

/** Class value if node is leaf. */

private double m_ClassValue;

/** Class distribution if node is leaf. */

private double[] m_Distribution;

/** Class attribute of dataset. */

private Attribute m_ClassAttribute;

/**

* Returns a string describing the classifier.

* @return a description suitable for the GUI.

*/

public String globalInfo() {

return “Class for constructing an unpruned decision tree based on the ID3 “

+ “algorithm. Can only deal with nominal attributes. No missing values “

+ “allowed. Empty leaves may result in unclassified instances. For more “

+ “information see: \n\n”

+ getTechnicalInformation().toString();

}

/**

* Returns an instance of a TechnicalInformation object, containing

* detailed information about the technical background of this class,

* e.g., paper reference or book this class is based on.

*

* @return the technical information about this class

*/

public TechnicalInformation getTechnicalInformation() {

TechnicalInformation result;

result = new TechnicalInformation(Type.ARTICLE);

result.setValue(Field.AUTHOR, “R. Quinlan”);

result.setValue(Field.YEAR, “1986”);

result.setValue(Field.TITLE, “Induction of decision trees”);

result.setValue(Field.JOURNAL, “Machine Learning”);

result.setValue(Field.VOLUME, “1”);

result.setValue(Field.NUMBER, “1”);

result.setValue(Field.PAGES, “81-106”);

return result;

}

/**

* Returns default capabilities of the classifier.

*

* @return the capabilities of this classifier

*/

public Capabilities getCapabilities() {

Capabilities result = super.getCapabilities();

result.disableAll();

// attributes

result.enable(Capability.NOMINAL_ATTRIBUTES);

// class

result.enable(Capability.NOMINAL_CLASS);

result.enable(Capability.MISSING_CLASS_VALUES);

// instances

result.setMinimumNumberInstances(0);

return result;

}

/**

* Builds Id3 decision tree classifier.

*

* @param data the training data

* @exception Exception if classifier can’t be built successfully

*/

public void buildClassifier(Instances data) throws Exception {

// can classifier handle the data?

getCapabilities().testWithFail(data);

// remove instances with missing class

data = new Instances(data);

data.deleteWithMissingClass();

makeTree(data);

}

/**

* Method for building an Id3 tree.

*

* @param data the training data

* @exception Exception if decision tree can’t be built successfully

*/

private void makeTree(Instances data) throws Exception {

// Check if no instances have reached this node.

if (data.numInstances() == 0) {

m_Attribute = null;

m_ClassValue = Instance.missingValue();

m_Distribution = new double[data.numClasses()];

return;

}

// Compute attribute with maximum information gain.

double[] infoGains = new double[data.numAttributes()];

Enumeration attEnum = data.enumerateAttributes();

while (attEnum.hasMoreElements()) {

Attribute att = (Attribute) attEnum.nextElement();

infoGains[att.index()] = computeInfoGain(data, att);

}

m_Attribute = data.attribute(Utils.maxIndex(infoGains));

// Make leaf if information gain is zero.

// Otherwise create successors.

if (Utils.eq(infoGains[m_Attribute.index()], 0)) {

m_Attribute = null;

m_Distribution = new double[data.numClasses()];

Enumeration instEnum = data.enumerateInstances();

while (instEnum.hasMoreElements()) {

Instance inst = (Instance) instEnum.nextElement();

m_Distribution[(int) inst.classValue()]++;

}

Utils.normalize(m_Distribution);

m_ClassValue = Utils.maxIndex(m_Distribution);

m_ClassAttribute = data.classAttribute();

} else {

Instances[] splitData = splitData(data, m_Attribute);

m_Successors = new Id3[m_Attribute.numValues()];

for (int j = 0; j m_Attribute.numValues(); j++) {

m_Successors[j] = new Id3();

m_Successors[j].makeTree(splitData[j]);

}

}

}

/**

* Classifies a given test instance using the decision tree.

*

* @param instance the instance to be classified

* @return the classification

* @throws NoSupportForMissingValuesException if instance has missing values

*/

public double classifyInstance(Instance instance)

throws NoSupportForMissingValuesException {

if (instance.hasMissingValue()) {

throw new NoSupportForMissingValuesException(“Id3: no missing values, “

+ “please.”);

}

if (m_Attribute == null) {

return m_ClassValue;

} else {

return m_Successors[(int) instance.value(m_Attribute)].

classifyInstance(instance);

}

}

/**

* Computes class distribution for instance using decision tree.

*

* @param instance the instance for which distribution is to be computed

* @return the class distribution for the given instance

* @throws NoSupportForMissingValuesException if instance has missing values

*/

public double[] distributionForInstance(Instance instance)

throws NoSupportForMissingValuesException {

if (instance.hasMissingValue()) {

throw new NoSupportForMissingValuesException(“Id3: no missing values, “

+ “please.”);

}

if (m_Attribute == null) {

return m_Distribution;

} else {

return m_Successors[(int) instance.value(m_Attribute)].

distributionForInstance(instance);

}

}

/**

* Prints the decision tree using the private toString method from below.

*

* @return a textual description of the classifier

*/

public String toString() {

if ((m_Distribution == null) (m_Successors == null)) {

return “Id3: No model built yet.”;

}

return “Id3\n\n” + toString(0);

}

/**

* Computes information gain for an attribute.

*

* @param data the data for which info gain is to be computed

* @param att the attribute

* @return the information gain for the given attribute and data

* @throws Exception if computation fails

*/

private double computeInfoGain(Instances data, Attribute att)

throws Exception {

double infoGain = computeEntropy(data);

Instances[] splitData = splitData(data, att);

for (int j = 0; j att.numValues(); j++) {

if (splitData[j].numInstances() 0) {

infoGain -= ((double) splitData[j].numInstances() /

(double) data.numInstances()) *

computeEntropy(splitData[j]);

}

}

return infoGain;

}

/**

* Computes the entropy of a dataset.

*

* @param data the data for which entropy is to be computed

* @return the entropy of the data’s class distribution

* @throws Exception if computation fails

*/

private double computeEntropy(Instances data) throws Exception {

double [] classCounts = new double[data.numClasses()];

Enumeration instEnum = data.enumerateInstances();

while (instEnum.hasMoreElements()) {

Instance inst = (Instance) instEnum.nextElement();

classCounts[(int) inst.classValue()]++;

}

double entropy = 0;

for (int j = 0; j data.numClasses(); j++) {

if (classCounts[j] 0) {

entropy -= classCounts[j] * Utils.log2(classCounts[j]);

}

}

entropy /= (double) data.numInstances();

return entropy + Utils.log2(data.numInstances());

}

/**

* Splits a dataset according to the values of a nominal attribute.

*

* @param data the data which is to be split

* @param att the attribute to be used for splitting

* @return the sets of instances produced by the split

*/

private Instances[] splitData(Instances data, Attribute att) {

Instances[] splitData = new Instances[att.numValues()];

for (int j = 0; j att.numValues(); j++) {

splitData[j] = new Instances(data, data.numInstances());

}

Enumeration instEnum = data.enumerateInstances();

while (instEnum.hasMoreElements()) {

Instance inst = (Instance) instEnum.nextElement();

splitData[(int) inst.value(att)].add(inst);

}

for (int i = 0; i splitData.length; i++) {

splitData[i].compactify();

}

return splitData;

}

/**

* Outputs a tree at a certain level.

*

* @param level the level at which the tree is to be printed

* @return the tree as string at the given level

*/

private String toString(int level) {

StringBuffer text = new StringBuffer();

if (m_Attribute == null) {

if (Instance.isMissingValue(m_ClassValue)) {

text.append(“: null”);

} else {

text.append(“: ” + m_ClassAttribute.value((int) m_ClassValue));

}

} else {

for (int j = 0; j m_Attribute.numValues(); j++) {

text.append(“\n”);

for (int i = 0; i level; i++) {

text.append(“| “);

}

text.append(m_Attribute.name() + ” = ” + m_Attribute.value(j));

text.append(m_Successors[j].toString(level + 1));

}

}

return text.toString();

}

/**

* Adds this tree recursively to the buffer.

*

* @param id the unqiue id for the method

* @param buffer the buffer to add the source code to

* @return the last ID being used

* @throws Exception if something goes wrong

*/

protected int toSource(int id, StringBuffer buffer) throws Exception {

int result;

int i;

int newID;

StringBuffer[] subBuffers;

buffer.append(“\n”);

buffer.append(” protected static double node” + id + “(Object[] i) {\n”);

// leaf?

if (m_Attribute == null) {

result = id;

if (Double.isNaN(m_ClassValue)) {

buffer.append(” return Double.NaN;”);

} else {

buffer.append(” return ” + m_ClassValue + “;”);

}

if (m_ClassAttribute != null) {

buffer.append(” // ” + m_ClassAttribute.value((int) m_ClassValue));

}

buffer.append(“\n”);

buffer.append(” }\n”);

} else {

buffer.append(” checkMissing(i, ” + m_Attribute.index() + “);\n\n”);

buffer.append(” // ” + m_Attribute.name() + “\n”);

// subtree calls

subBuffers = new StringBuffer[m_Attribute.numValues()];

newID = id;

for (i = 0; i m_Attribute.numValues(); i++) {

newID++;

buffer.append(” “);

if (i 0) {

buffer.append(“else “);

}

buffer.append(“if (((String) i[” + m_Attribute.index()

+ “]).equals(\”” + m_Attribute.value(i) + “\”))\n”);

buffer.append(” return node” + newID + “(i);\n”);

subBuffers[i] = new StringBuffer();

newID = m_Successors[i].toSource(newID, subBuffers[i]);

}

buffer.append(” else\n”);

buffer.append(” throw new IllegalArgumentException(\”Value ‘\” + i[“

+ m_Attribute.index() + “] + \”‘ is not allowed!\”);\n”);

buffer.append(” }\n”);

// output subtree code

for (i = 0; i m_Attribute.numValues(); i++) {

buffer.append(subBuffers[i].toString());

}

subBuffers = null;

result = newID;

}

return result;

}

/**

* Returns a string that describes the classifier as source. The

* classifier will be contained in a class with the given name (there may

* be auxiliary classes),

* and will contain a method with the signature:

* precode

* public static double classify(Object[] i);

* /code/pre

* where the array codei/code contains elements that are either

* Double, String, with missing values represented as null. The generated

* code is public domain and comes with no warranty. br/

* Note: works only if class attribute is the last attribute in the dataset.

*

* @param className the name that should be given to the source class.

* @return the object source described by a string

* @throws Exception if the source can’t be computed

*/

public String toSource(String className) throws Exception {

StringBuffer result;

int id;

result = new StringBuffer();

result.append(“class ” + className + ” {\n”);

result.append(” private static void checkMissing(Object[] i, int index) {\n”);

result.append(” if (i[index] == null)\n”);

result.append(” throw new IllegalArgumentException(\”Null values “

+ “are not allowed!\”);\n”);

result.append(” }\n\n”);

result.append(” public static double classify(Object[] i) {\n”);

id = 0;

result.append(” return node” + id + “(i);\n”);

result.append(” }\n”);

toSource(id, result);

result.append(“}\n”);

return result.toString();

}

/**

* Returns the revision string.

*

* @return the revision

*/

public String getRevision() {

return RevisionUtils.extract(“$Revision: 6404 $”);

}

/**

* Main method.

*

* @param args the options for the classifier

*/

public static void main(String[] args) {

runClassifier(new Id3(), args);

}

}

是用python学数据挖掘好,还是用java学weka的开发好

你熟悉python 就用 python,

你熟悉java 就学weka, weka 一个软件连续开发20多年,及其成熟而且做到极致,很多都已经能自动化,其扩展版本甚至算法都能自己选择,极致到完全不懂的人都可以用。

weka有损自以为高深的人的威严和神秘感。很多人特别喜欢反复找轮子,宁愿选择用匕首去杀敌,即使匕首旁边有一支子弹上了膛的AK47,也会选择视而不见。当然如果你要做一些复杂的应用,熟悉java是必须的, 另python 也可以调用weka (好像叫jython)

北大青鸟java培训:八个最佳的数据中心开源挖掘工具?

数据挖掘,又称为资料探勘、数据采矿。

它是数据库知识发现(英语:Knowledge-DiscoveryinDatabases,简称:KDD)中的一个步骤,是一个挖掘和分析大量数据并从中提取信息的过程。

其中一些应用包括市场细分-如识别客户从特定品牌购买特定产品的特征,欺诈检测-识别可能导致在线欺诈的交易模式等。

在本文中,贵阳电脑培训整理了进行数据挖掘的8个最佳开源工具。

1、WekaWEKA作为一个公开的数据挖掘工作平台,集合了大量能承担数据挖掘任务的机器学习算法,包括对数据进行预处理,分类,回归、聚类、关联规则以及在新的交互式界面上的可视化。

2、RapidMinerRapidMiner是世界领先的数据挖掘解决方案,在一个非常大的程度上有着先进技术。

它数据挖掘任务涉及范围广泛,包括各种数据艺术,能简化数据挖掘过程的设计和评价。

3、OrangeOrange是一个基于组件的数据挖掘和机器学习软件套装,它的功能即友好,又很强大,快速而又多功能的可视化编程前端,以便浏览数据分析和可视化,基绑定了Python以进行脚本开发。

它包含了完整的一系列的组件以进行数据预处理,并提供了数据帐目,过渡,建模,模式评估和勘探的功能。

其由C++和Python开发,它的图形库是由跨平台的Qt框架开发。

4、KnimeKNIME(KonstanzInformationMiner)是一个用户友好,智能的,并有丰演的开源的数据集成,数据处理,数据分析和数据勘探平台。

5、jHepWorkjHepWork是一套功能完整的面向对象科学数据分析框架。

Jython宏是用来展示一维和二维直方图的数据。

该程序包括许多工具,可以用来和二维三维的科学图形进行互动。

6、ApacheMahoutApacheMahout是ApacheSoftwareFoundation(ASF)开发的一个全新的开源项目,其主要目标是创建一些可伸缩的机器学习算法,供开发人员在Apache在许可下免费使用。

该项目已经发展到了它的最二个年头,目前只有一个公共发行版。

Mahout包含许多实现,包括集群、分类、CP和进化程序。

此外,通过使用ApacheHadoop库,Mahout可以有效地扩展到云中。

7、ELKIELKI(EnvironmentforDevelopingKDD-ApplicationsSupportedbyIndex-Structures)主要用来聚类和找离群点。

ELKI是类似于weka的数据挖掘平台,用java编写,有GUI图形界面。

可以用来寻找离群点。

原创文章,作者:小蓝,如若转载,请注明出处:https://www.506064.com/n/279714.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
小蓝小蓝
上一篇 2024-12-20 15:05
下一篇 2024-12-20 15:05

相关推荐

  • MQTT使用教程

    MQTT是一种轻量级的消息传输协议,适用于物联网领域中的设备与云端、设备与设备之间的数据传输。本文将介绍使用MQTT实现设备与云端数据传输的方法和注意事项。 一、准备工作 在使用M…

    编程 2025-04-29
  • Python3.6.5下载安装教程

    Python是一种面向对象、解释型计算机程序语言。它是一门动态语言,因为它不会对程序员提前声明变量类型,而是在变量第一次赋值时自动识别该变量的类型。 Python3.6.5是Pyt…

    编程 2025-04-29
  • Deepin系统分区设置教程

    本教程将会详细介绍Deepin系统如何进行分区设置,分享多种方式让您了解如何规划您的硬盘。 一、分区的基本知识 在进行Deepin系统分区设置之前,我们需要了解一些基本分区概念。 …

    编程 2025-04-29
  • 写代码新手教程

    本文将从语言选择、学习方法、编码规范以及常见问题解答等多个方面,为编程新手提供实用、简明的教程。 一、语言选择 作为编程新手,选择一门编程语言是很关键的一步。以下是几个有代表性的编…

    编程 2025-04-29
  • Qt雷达探测教程

    本文主要介绍如何使用Qt开发雷达探测程序,并展示一个简单的雷达探测示例。 一、环境准备 在开始本教程之前,需要确保你的开发环境已经安装Qt和Qt Creator。如果没有安装,可以…

    编程 2025-04-29
  • 猿编程python免费全套教程400集

    想要学习Python编程吗?猿编程python免费全套教程400集是一个不错的选择!下面我们来详细了解一下这个教程。 一、课程内容 猿编程python免费全套教程400集包含了从P…

    编程 2025-04-29
  • Python烟花教程

    Python烟花代码在近年来越来越受到人们的欢迎,因为它可以让我们在终端里玩烟花,不仅具有视觉美感,还可以通过代码实现动画和音效。本教程将详细介绍Python烟花代码的实现原理和模…

    编程 2025-04-29
  • 使用Snare服务收集日志:完整教程

    本教程将介绍如何使用Snare服务收集Windows服务器上的日志,并将其发送到远程服务器进行集中管理。 一、安装和配置Snare 1、下载Snare安装程序并安装。 https:…

    编程 2025-04-29
  • Python画K线教程

    本教程将从以下几个方面详细介绍Python画K线的方法及技巧,包括数据处理、图表绘制、基本设置等等。 一、数据处理 1、获取数据 在Python中可以使用Pandas库获取K线数据…

    编程 2025-04-28
  • Python语言程序设计教程PDF赵璐百度网盘介绍

    Python语言程序设计教程PDF赵璐百度网盘是一本介绍Python语言编程的入门教材,本文将从以下几个方面对其进行详细阐述。 一、Python语言的特点 Python语言属于解释…

    编程 2025-04-28

发表回复

登录后才能评论