Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Errors fixed in calculating time to transfer a replica #10

Open
wants to merge 1 commit into
base: replication
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ protected static List<CondorVM> createVM(int userId, int vms) {
Random bwRandom = new Random(System.currentTimeMillis());

for (int i = 0; i < vms; i++) {
double ratio = bwRandom.nextDouble();
double ratio = 1;
vm[i] = new CondorVM(i, userId, mips * ratio, pesNumber, ram, (long) (bw * ratio), size, vmm, new CloudletSchedulerSpaceShared());
list.add(vm[i]);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,109 +37,118 @@
* @author Weiwei Chen
* @since WorkflowSim Toolkit 1.1
* @date Nov 9, 2013
*
*
*/

public class DataAwareSchedulingAlgorithmExample2 extends DHEFTPlanningAlgorithmExample1 {

////////////////////////// STATIC METHODS ///////////////////////
/**
* Creates main() to run this example This example has only one datacenter
* and one storage
*/

public static void runSim(){
try {
// First step: Initialize the WorkflowSim package.

/**
* However, the exact number of vms may not necessarily be vmNum If
* the data center or the host doesn't have sufficient resources the
* exact vmNum would be smaller than that. Take care.
*/
int vmNum = 20;//number of vms;
/**
* Should change this based on real physical path
*/
String daxPath = "/Users/Hassan/git/WorkflowSim-1.0/config/dax/Montage_100.xml";

File daxFile = new File(daxPath);
if (!daxFile.exists()) {
Log.printLine("Warning: Please replace daxPath with the physical path in your working environment!");
return;
}

/**
* Since we are using HEFT planning algorithm, the scheduling
* algorithm should be static such that the scheduler would not
* override the result of the planner
*/
Parameters.SchedulingAlgorithm sch_method = Parameters.SchedulingAlgorithm.DATA;
Parameters.PlanningAlgorithm pln_method = Parameters.PlanningAlgorithm.INVALID;
ReplicaCatalog.FileSystem file_system = ReplicaCatalog.FileSystem.RANDOM;

/**
* No overheads
*/
OverheadParameters op = new OverheadParameters(0, null, null, null, null, 0);

/**
* No Clustering
*/
ClusteringParameters.ClusteringMethod method = ClusteringParameters.ClusteringMethod.NONE;
ClusteringParameters cp = new ClusteringParameters(0, 0, method, null);

/**
* Initialize static parameters
*/
Parameters.init(vmNum, daxPath, null,
null, op, cp, sch_method, pln_method,
null, 0);
ReplicaCatalog.init(file_system);

// before creating any entities.
int num_user = 1; // number of grid users
Calendar calendar = Calendar.getInstance();
boolean trace_flag = false; // mean trace events

// Initialize the CloudSim library
CloudSim.init(num_user, calendar, trace_flag);

WorkflowDatacenter datacenter0 = createDatacenter("Datacenter_0");

/**
* Create a WorkflowPlanner with one schedulers.
*/
WorkflowPlanner wfPlanner = new WorkflowPlanner("planner_0", 1);
/**
* Create a WorkflowEngine.
*/
WorkflowEngine wfEngine = wfPlanner.getWorkflowEngine();
/**
* Create a list of VMs.The userId of a vm is basically the id of
* the scheduler that controls this vm.
*/
List<CondorVM> vmlist0 = createVM(wfEngine.getSchedulerId(0), Parameters.getVmNum());

/**
* Submits this list of vms to this WorkflowEngine.
*/
wfEngine.submitVmList(vmlist0, 0);

/**
* Binds the data centers with the scheduler.
*/
wfEngine.bindSchedulerDatacenter(datacenter0.getId(), 0);
CloudSim.startSimulation();

List<Job> outputList0 = wfEngine.getJobsReceivedList();

CloudSim.stopSimulation();

printJobList(outputList0);

} catch (Exception e) {
Log.printLine("The simulation has been terminated due to an unexpected error");
}
}
public static void main(String[] args) {


try {
// First step: Initialize the WorkflowSim package.

/**
* However, the exact number of vms may not necessarily be vmNum If
* the data center or the host doesn't have sufficient resources the
* exact vmNum would be smaller than that. Take care.
*/
int vmNum = 5;//number of vms;
/**
* Should change this based on real physical path
*/
String daxPath = "/Users/chenweiwei/Work/WorkflowSim-1.0/config/dax/Montage_100.xml";

File daxFile = new File(daxPath);
if (!daxFile.exists()) {
Log.printLine("Warning: Please replace daxPath with the physical path in your working environment!");
return;
}

/**
* Since we are using HEFT planning algorithm, the scheduling
* algorithm should be static such that the scheduler would not
* override the result of the planner
*/
Parameters.SchedulingAlgorithm sch_method = Parameters.SchedulingAlgorithm.DATA;
Parameters.PlanningAlgorithm pln_method = Parameters.PlanningAlgorithm.INVALID;
ReplicaCatalog.FileSystem file_system = ReplicaCatalog.FileSystem.RANDOM;

/**
* No overheads
*/
OverheadParameters op = new OverheadParameters(0, null, null, null, null, 0);;

/**
* No Clustering
*/
ClusteringParameters.ClusteringMethod method = ClusteringParameters.ClusteringMethod.NONE;
ClusteringParameters cp = new ClusteringParameters(0, 0, method, null);

/**
* Initialize static parameters
*/
Parameters.init(vmNum, daxPath, null,
null, op, cp, sch_method, pln_method,
null, 0);
ReplicaCatalog.init(file_system);

// before creating any entities.
int num_user = 1; // number of grid users
Calendar calendar = Calendar.getInstance();
boolean trace_flag = false; // mean trace events

// Initialize the CloudSim library
CloudSim.init(num_user, calendar, trace_flag);

WorkflowDatacenter datacenter0 = createDatacenter("Datacenter_0");

/**
* Create a WorkflowPlanner with one schedulers.
*/
WorkflowPlanner wfPlanner = new WorkflowPlanner("planner_0", 1);
/**
* Create a WorkflowEngine.
*/
WorkflowEngine wfEngine = wfPlanner.getWorkflowEngine();
/**
* Create a list of VMs.The userId of a vm is basically the id of
* the scheduler that controls this vm.
*/
List<CondorVM> vmlist0 = createVM(wfEngine.getSchedulerId(0), Parameters.getVmNum());

/**
* Submits this list of vms to this WorkflowEngine.
*/
wfEngine.submitVmList(vmlist0, 0);

/**
* Binds the data centers with the scheduler.
*/
wfEngine.bindSchedulerDatacenter(datacenter0.getId(), 0);

CloudSim.startSimulation();

List<Job> outputList0 = wfEngine.getJobsReceivedList();

CloudSim.stopSimulation();

printJobList(outputList0);

} catch (Exception e) {
Log.printLine("The simulation has been terminated due to an unexpected error");
}
for (int i=0; i<50 ;i++){
runSim();
}

}
}


25 changes: 18 additions & 7 deletions sources/org/workflowsim/WorkflowDatacenter.java
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
*/
package org.workflowsim;

import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
Expand Down Expand Up @@ -429,7 +431,7 @@ private double calculateDataTransferDelay(File file, int userId, int vmId, Vm vm
}
}
if (requiredFileStagein && maxBwth > 0.0) {
time = file.getSize() / Consts.MILLION * 8 / maxBwth;
time = (file.getSize() / (double)(Consts.MILLION * 8)) / maxBwth;
}
return time;
}
Expand Down Expand Up @@ -462,20 +464,29 @@ private void register(Cloudlet cl) {
break;
case RANDOM:
ReplicaCatalog.addStorageList(file.getName(), Integer.toString(vmId));
Random random = new Random(System.currentTimeMillis());
double factor = 0.1;
int vm2copy = (int) ((double) Parameters.getVmNum() * factor);
for (int i = 0; i < vm2copy; i++) {
int destination = (int) (random.nextDouble() * (double) Parameters.getVmNum());

double factor = 1.0;
List <Integer> randoms= new ArrayList<Integer>();
for (int i=0 ; i< Parameters.getVmNum(); i++){
randoms.add(i);
}
Collections.shuffle(randoms);


for (int i = 0; i < (int)(factor* Parameters.getVmNum()); i++) {
int destination = randoms.get(i);
FileStageOutMessage message = new FileStageOutMessage(destination, vmId, file.getName());

double delay = calculateDataTransferDelay(file, userId, vmId, vm);
send(userId, delay, WorkflowSimTags.FILE_STAGE_OUT, message);

send(this.getId(), delay, WorkflowSimTags.FILE_STAGE_OUT, message);
}
break;
}
}
}
}


private class FileStageOutMessage {

Expand Down
12 changes: 6 additions & 6 deletions sources/org/workflowsim/planning/HEFTPlanningAlgorithm.java
Original file line number Diff line number Diff line change
Expand Up @@ -70,11 +70,11 @@ public int compareTo(TaskRank o) {
}

public HEFTPlanningAlgorithm() {
computationCosts = new HashMap<>();
transferCosts = new HashMap<>();
rank = new HashMap<>();
earliestFinishTimes = new HashMap<>();
schedules = new HashMap<>();
computationCosts = new HashMap<Task, Map<CondorVM, Double>>();
transferCosts = new HashMap<Task, Map<Task, Double>>();
rank = new HashMap<Task, Double>();
earliestFinishTimes = new HashMap<Task, Double>();
schedules = new HashMap<CondorVM, List<Event>>();
}

/**
Expand Down Expand Up @@ -246,7 +246,7 @@ private double calculateRank(Task task) {
* Allocates all tasks to be scheduled in non-ascending order of schedule.
*/
private void allocateTasks() {
List<TaskRank> taskRank = new ArrayList<>();
List<TaskRank> taskRank = new ArrayList<TaskRank>();
for (Task task : rank.keySet()) {
taskRank.add(new TaskRank(task, rank.get(task)));
}
Expand Down