Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

where i can find these dax andxml files please advice #24

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
316 changes: 316 additions & 0 deletions examples/org/workflowsim/examples/WorkflowSimBasicExample2.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,316 @@
/**
* Copyright 2012-2013 University Of Southern California
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.workflowsim.examples;

import java.io.File;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.LinkedList;
import java.util.List;
import org.cloudbus.cloudsim.Cloudlet;
import org.cloudbus.cloudsim.CloudletSchedulerSpaceShared;
import org.cloudbus.cloudsim.DatacenterCharacteristics;
import org.cloudbus.cloudsim.Host;
import org.cloudbus.cloudsim.Log;
import org.cloudbus.cloudsim.Pe;
import org.cloudbus.cloudsim.Storage;
import org.cloudbus.cloudsim.VmAllocationPolicySimple;
import org.cloudbus.cloudsim.VmSchedulerTimeShared;
import org.cloudbus.cloudsim.core.CloudSim;
import org.cloudbus.cloudsim.provisioners.BwProvisionerSimple;
import org.cloudbus.cloudsim.provisioners.PeProvisionerSimple;
import org.cloudbus.cloudsim.provisioners.RamProvisionerSimple;
import org.workflowsim.ClusterStorage;
import org.workflowsim.CondorVM;
import org.workflowsim.DatacenterExtended;
import org.workflowsim.Job;
import org.workflowsim.WorkflowEngine;
import org.workflowsim.WorkflowPlanner;
import org.workflowsim.failure.FailureGenerator;
import org.workflowsim.failure.FailureMonitor;
import org.workflowsim.utils.ClusteringParameters;
import org.workflowsim.utils.OverheadParameters;
import org.workflowsim.utils.Parameters;
import org.workflowsim.utils.ReplicaCatalog;

/**
* This WorkflowSimExample creates a workflow planner, a workflow engine, and
* one schedulers, one data centers and 20 vms. You should change daxPath at least.
* You may change other parameters as well.
*
* @author Weiwei Chen
* @since WorkflowSim Toolkit 1.0
* @date Apr 9, 2013
*/
public class WorkflowSimBasicExample2 {

private static List<CondorVM> createVM(int userId, int vms) {

//Creates a container to store VMs. This list is passed to the broker later
LinkedList<CondorVM> list = new LinkedList<CondorVM>();

//VM Parameters
long size = 10000; //image size (MB)
int ram = 512; //vm memory (MB)
int mips = 1000;
long bw = 1000;
int pesNumber = 1; //number of cpus
String vmm = "Xen"; //VMM name

//create VMs
CondorVM[] vm = new CondorVM[vms];

for (int i = 0; i < vms; i++) {
double ratio = 1.0;
vm[i] = new CondorVM(i, userId, mips * ratio, pesNumber, ram, bw, size, vmm, new CloudletSchedulerSpaceShared());
list.add(vm[i]);
}

return list;
}

////////////////////////// STATIC METHODS ///////////////////////
/**
* Creates main() to run this example
* This example has only one datacenter and one storage
*/
public static void main(String[] args) {


try {
// First step: Initialize the WorkflowSim package.

/**
* However, the exact number of vms may not necessarily be vmNum If
* the data center or the host doesn't have sufficient resources the
* exact vmNum would be smaller than that. Take care.
*/
int vmNum = 20;//number of vms;
/**
* Should change this based on real physical path
*/
String daxPath = "/Users/chenweiwei/Work/WorkflowSim-1.0/config/dax/Montage_100.xml";
if(daxPath == null){
Log.printLine("Warning: Please replace daxPath with the physical path in your working environment!");
return;
}
File daxFile = new File(daxPath);
if(!daxFile.exists()){
Log.printLine("Warning: Please replace daxPath with the physical path in your working environment!");
return;
}
/*
* Use default Fault Tolerant Parameters
*/
Parameters.FTCMonitor ftc_monitor = Parameters.FTCMonitor.MONITOR_NONE;
Parameters.FTCFailure ftc_failure = Parameters.FTCFailure.FAILURE_NONE;
Parameters.FTCluteringAlgorithm ftc_method = null;

/**
* Since we are using MINMIN scheduling algorithm, the planning algorithm should be INVALID
* such that the planner would not override the result of the scheduler
*/
Parameters.SchedulingAlgorithm sch_method = Parameters.SchedulingAlgorithm.MINMIN;
Parameters.PlanningAlgorithm pln_method = Parameters.PlanningAlgorithm.INVALID;
ReplicaCatalog.FileSystem file_system = ReplicaCatalog.FileSystem.SHARED;

/**
* No overheads
*/
OverheadParameters op = new OverheadParameters(0, null, null, null, null, 0);;

/**
* No Clustering
*/
ClusteringParameters.ClusteringMethod method = ClusteringParameters.ClusteringMethod.NONE;
ClusteringParameters cp = new ClusteringParameters(0, 0, method, null);

/**
* Initialize static parameters
*/
Parameters.init(ftc_method, ftc_monitor, ftc_failure,
null, vmNum, daxPath, null,
null, op, cp, sch_method, pln_method,
null, 0);
ReplicaCatalog.init(file_system);

FailureMonitor.init();
FailureGenerator.init();

// before creating any entities.
int num_user = 1; // number of grid users
Calendar calendar = Calendar.getInstance();
boolean trace_flag = false; // mean trace events

// Initialize the CloudSim library
CloudSim.init(num_user, calendar, trace_flag);

DatacenterExtended datacenter0 = createDatacenter("Datacenter_0");

/**
* Create a WorkflowPlanner with one schedulers.
*/
WorkflowPlanner wfPlanner = new WorkflowPlanner("planner_0", 1);
/**
* Create a WorkflowEngine.
*/
WorkflowEngine wfEngine = wfPlanner.getWorkflowEngine();
/**
* Create a list of VMs.The userId of a vm is basically the id of the scheduler
* that controls this vm.
*/
List<CondorVM> vmlist0 = createVM(wfEngine.getSchedulerId(0), Parameters.getVmNum());

/**
* Submits this list of vms to this WorkflowEngine.
*/
wfEngine.submitVmList(vmlist0, 0);

/**
* Binds the data centers with the scheduler.
*/
wfEngine.bindSchedulerDatacenter(datacenter0.getId(), 0);

CloudSim.startSimulation();


List<Job> outputList0 = wfEngine.getJobsReceivedList();

CloudSim.stopSimulation();

printJobList(outputList0);


} catch (Exception e) {
Log.printLine("The simulation has been terminated due to an unexpected error");
}
}

private static DatacenterExtended createDatacenter(String name) {

// Here are the steps needed to create a PowerDatacenter:
// 1. We need to create a list to store one or more
// Machines
List<Host> hostList = new ArrayList<Host>();

// 2. A Machine contains one or more PEs or CPUs/Cores. Therefore, should
// create a list to store these PEs before creating
// a Machine.
for (int i = 1; i <= 20; i++) {
List<Pe> peList1 = new ArrayList<Pe>();
int mips = 2000;
// 3. Create PEs and add these into the list.
//for a quad-core machine, a list of 4 PEs is required:
peList1.add(new Pe(0, new PeProvisionerSimple(mips))); // need to store Pe id and MIPS Rating
peList1.add(new Pe(1, new PeProvisionerSimple(mips)));

int hostId = 0;
int ram = 2048; //host memory (MB)
long storage = 1000000; //host storage
int bw = 10000;
hostList.add(
new Host(
hostId,
new RamProvisionerSimple(ram),
new BwProvisionerSimple(bw),
storage,
peList1,
new VmSchedulerTimeShared(peList1))); // This is our first machine
hostId++;

}

// 5. Create a DatacenterCharacteristics object that stores the
// properties of a data center: architecture, OS, list of
// Machines, allocation policy: time- or space-shared, time zone
// and its price (G$/Pe time unit).
String arch = "x86"; // system architecture
String os = "Linux"; // operating system
String vmm = "Xen";
double time_zone = 10.0; // time zone this resource located
double cost = 3.0; // the cost of using processing in this resource
double costPerMem = 0.05; // the cost of using memory in this resource
double costPerStorage = 0.1; // the cost of using storage in this resource
double costPerBw = 0.1; // the cost of using bw in this resource
LinkedList<Storage> storageList = new LinkedList<Storage>(); //we are not adding SAN devices by now
DatacenterExtended datacenter = null;


DatacenterCharacteristics characteristics = new DatacenterCharacteristics(
arch, os, vmm, hostList, time_zone, cost, costPerMem, costPerStorage, costPerBw);


// 6. Finally, we need to create a cluster storage object.
/**
* The bandwidth within a data center.
*/
double intraBandwidth = 1.5e7;// the number comes from the futuregrid site, you can specify your bw

try {
ClusterStorage s1 = new ClusterStorage(name, 1e12);

// The bandwidth within a data center
s1.setBandwidth("local", intraBandwidth);
// The bandwidth to the source site
s1.setBandwidth("source", intraBandwidth);
storageList.add(s1);
datacenter = new DatacenterExtended(name, characteristics, new VmAllocationPolicySimple(hostList), storageList, 0);
} catch (Exception e) {
}

return datacenter;
}

/**
* Prints the job objects
*
* @param list list of jobs
*/
private static void printJobList(List<Job> list) {
int size = list.size();
Job job;

String indent = " ";
Log.printLine();
Log.printLine("========== OUTPUT ==========");
Log.printLine("Cloudlet ID" + indent + "STATUS" + indent
+ "Data center ID" + indent + "VM ID" + indent + indent + "Time" + indent + "Start Time" + indent + "Finish Time" + indent + "Depth");

DecimalFormat dft = new DecimalFormat("###.##");
for (int i = 0; i < size; i++) {
job = list.get(i);
Log.print(indent + job.getCloudletId() + indent + indent);

if (job.getCloudletStatus() == Cloudlet.SUCCESS) {
Log.print("SUCCESS");

Log.printLine(indent + indent + job.getResourceId() + indent + indent + indent + job.getVmId()
+ indent + indent + indent + dft.format(job.getActualCPUTime())
+ indent + indent + dft.format(job.getExecStartTime()) + indent + indent + indent
+ dft.format(job.getFinishTime()) + indent + indent + indent + job.getDepth() + indent + job.getProcessingCost());
} else if (job.getCloudletStatus() == Cloudlet.FAILED) {
Log.print("FAILED");

Log.printLine(indent + indent + job.getResourceId() + indent + indent + indent + job.getVmId()
+ indent + indent + indent + dft.format(job.getActualCPUTime())
+ indent + indent + dft.format(job.getExecStartTime()) + indent + indent + indent
+ dft.format(job.getFinishTime()) + indent + indent + indent + job.getDepth());
}
}

}
}
Original file line number Diff line number Diff line change
Expand Up @@ -98,11 +98,12 @@ public static void main(String[] args) {
* delete in the future
*
*/
String code = "i";
String daxPath = "/Users/chenweiwei/Research/balanced_clustering/generator/BharathiPaper/Fake_1.xml";
double intraBandwidth = 1.5e5;
double c_delay = 0, q_delay = 0, e_delay = 0, p_delay = 0;
int interval = 0;
String code = "n";
String daxPath = "/Users/chenweiwei/NetBeansProjects/BhWorkflowGen/Inspiral_8000.xml";
double intraBandwidth = 1.5e7;
double c_delay = 0, q_delay = 100, e_delay = 100, p_delay = 0;
int interval = 5;
int vmNum = 200;//number of vms;

for(int i = 0; i < args.length; i ++){
char key = args[i].charAt(1);
Expand Down Expand Up @@ -131,6 +132,9 @@ public static void main(String[] args) {
case 'i':
interval = Integer.parseInt(args[++i]);
break;
case 'v':
vmNum = Integer.parseInt(args[++i]);
break;
}
}
// First step: Initialize the WorkflowSim package.
Expand All @@ -140,7 +144,7 @@ public static void main(String[] args) {
* the data center or the host doesn't have sufficient resources the
* exact vmNum would be smaller than that. Take care.
*/
int vmNum = 20;//number of vms;

/**
* Should change this based on real physical path
*/
Expand Down Expand Up @@ -206,7 +210,7 @@ public static void main(String[] args) {
* i: Horizontal Impact Factor Balancing (HIFB)
* h: Horizontal Random Balancing , the original horizontal clustering
*/
ClusteringParameters cp = new ClusteringParameters(20, 0, method, code);
ClusteringParameters cp = new ClusteringParameters(vmNum, 0, method, code);


/**
Expand All @@ -229,7 +233,7 @@ public static void main(String[] args) {
// Initialize the CloudSim library
CloudSim.init(num_user, calendar, trace_flag);

DatacenterExtended datacenter0 = createDatacenter("Datacenter_0", intraBandwidth);
DatacenterExtended datacenter0 = createDatacenter("Datacenter_0", intraBandwidth, vmNum);

/**
* Create a WorkflowPlanner with one schedulers.
Expand Down Expand Up @@ -270,12 +274,11 @@ public static void main(String[] args) {
}
}

private static DatacenterExtended createDatacenter(String name , double intraBandwidth) {
private static DatacenterExtended createDatacenter(String name , double intraBandwidth, int vmNumber) {

// Here are the steps needed to create a PowerDatacenter:
// 1. We need to create a list to store one or more
// Machines
int vmNumber = 20;
List<Host> hostList = new ArrayList<Host>();

// 2. A Machine contains one or more PEs or CPUs/Cores. Therefore, should
Expand All @@ -290,8 +293,8 @@ private static DatacenterExtended createDatacenter(String name , double intraBan
peList1.add(new Pe(1, new PeProvisionerSimple(mips)));

int hostId = 0;
int ram = 2048; //host memory (MB)
long storage = 1000000; //host storage
int ram = 2048 * vmNumber / 20; //host memory (MB)
long storage = 1000000 * vmNumber / 20; //host storage
int bw = 10000;
hostList.add(
new Host(
Expand Down
Loading