public PrepareForMigrationAnswer execute(PrepareForMigrationCommand cmd) { VirtualMachineTO vm = cmd.getVirtualMachine(); if (LOGGER.isDebugEnabled()) { LOGGER.debug("Preparing host for migrating " + vm.getName()); } NicTO[] nics = vm.getNics(); try { for (NicTO nic : nics) { network.getNetwork(nic); } hypervisor.setVmState(vm.getName(), State.Migrating); LOGGER.debug("VM " + vm.getName() + " is in Migrating state"); return new PrepareForMigrationAnswer(cmd); } catch (Ovm3ResourceException e) { LOGGER.error("Catch Exception " + e.getClass().getName() + " prepare for migration failed due to: " + e.getMessage()); return new PrepareForMigrationAnswer(cmd, e); } }
public Boolean createVifs(Xen.Vm vm, VirtualMachineTO spec) throws Ovm3ResourceException { if (spec.getNics() != null) { NicTO[] nics = spec.getNics(); return createVifs(vm, nics); } else { LOGGER.info("No nics for vm " + spec.getName()); return false; } }
protected void applySpecToVm(OvmVm.Details vm, VirtualMachineTO spec) { vm.name = spec.getName(); vm.memory = spec.getMinRam(); vm.cpuNum = spec.getCpus(); vm.uuid = UUID.nameUUIDFromBytes(spec.getName().getBytes()).toString(); if (spec.getBootloader() == BootloaderType.CD) { vm.bootDev = OvmVm.CD; vm.type = OvmVm.HVM; } else { vm.bootDev = OvmVm.HDD; String osType = OvmHelper.getOvmGuestType(spec.getOs()); if (OvmHelper.ORACLE_LINUX.equals(osType)) { vm.type = OvmVm.PV; } else if (OvmHelper.WINDOWS.equals(osType) || OvmHelper.ORACLE_SOLARIS.equals(osType)) { vm.type = OvmVm.HVM; } else { throw new CloudRuntimeException(spec.getOs() + " is not supported" + ",Oracle VM only supports Oracle Linux and Windows"); } } }
@Override public MockVm createVmFromSpec(VirtualMachineTO vmSpec) { String vmName = vmSpec.getName(); long ramSize = vmSpec.getMinRam(); int utilizationPercent = randSeed.nextInt() % 100; MockVm vm = null; synchronized (this) { vm = vms.get(vmName); if (vm == null) { if (ramSize > getHostFreeMemory()) { s_logger.debug("host is out of memory"); throw new CloudRuntimeException("Host is out of Memory"); } int vncPort = allocVncPort(); if (vncPort < 0) { s_logger.debug("Unable to allocate VNC port"); throw new CloudRuntimeException("Unable to allocate vnc port"); } vm = new MockVm(vmName, State.Running, ramSize, vmSpec.getCpus(), utilizationPercent, vncPort); vms.put(vmName, vm); } } return vm; }
if (s_logger.isDebugEnabled()) { s_logger.debug("Creating VGPU of VGPU type [ " + gpuDevice.getVgpuType() + " ] in gpu group" + gpuDevice.getGpuGroup() + " for VM " + cmd.getVirtualMachine().getName()); s_logger.debug("Created VGPU of VGPU type [ " + gpuDevice.getVgpuType() + " ] for VM " + cmd.getVirtualMachine().getName());
q.and(q.entity().getInstanceName(), SearchCriteria.Op.EQ, vm.getName()); VMInstanceVO vmvo = q.find(); q.and(q.entity().getInstanceName(), SearchCriteria.Op.EQ, vm.getName()); vmvo = q.find(); if (vmvo == null) { return new StartAnswer(cmd, String.format("cannot find vm[name:%s] while waiting for baremtal provision done notification", vm.getName())); return new StartAnswer(cmd, String.format("timeout after %s seconds, no baremetal provision done notification received. vm[name:%s] failed to start", isProvisionDoneNotificationTimeout, vm.getName())); s_logger.debug("Start bare metal vm " + vm.getName() + "successfully"); _vmName = vm.getName(); return new StartAnswer(cmd);
@Override public boolean replugNic(final Network network, final NicTO nic, final VirtualMachineTO vm, final ReservationContext context, final DeployDestination dest) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { boolean result = true; final VMInstanceVO router = _vmDao.findById(vm.getId()); if (router.getState() == State.Running) { try { final ReplugNicCommand replugNicCmd = new ReplugNicCommand(nic, vm.getName(), vm.getType(), vm.getDetails()); final Commands cmds = new Commands(Command.OnError.Stop); cmds.addCommand("replugnic", replugNicCmd); _agentMgr.send(dest.getHost().getId(), cmds); final ReplugNicAnswer replugNicAnswer = cmds.getAnswer(ReplugNicAnswer.class); if (replugNicAnswer == null || !replugNicAnswer.getResult()) { s_logger.warn("Unable to replug nic for vm " + vm.getName()); result = false; } } catch (final OperationTimedoutException e) { throw new AgentUnavailableException("Unable to plug nic for router " + vm.getName() + " in network " + network, dest.getHost().getId(), e); } } else { s_logger.warn("Unable to apply ReplugNic, vm " + router + " is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to apply ReplugNic on the backend," + " vm " + vm + " is not in the right state", DataCenter.class, router.getDataCenterId()); } return result; }
s_logger.error("Migration of vm " + vmSpec.getName() + " with storage failed due to " + e.toString(), e); return new MigrateWithStorageReceiveAnswer(command, e); } catch (final Exception e) { s_logger.error("Migration of vm " + vmSpec.getName() + " with storage failed due to " + e.toString(), e); return new MigrateWithStorageReceiveAnswer(command, e);
public boolean plugNic(final Network network, final NicTO nic, final VirtualMachineTO vm, final ReservationContext context, final DeployDestination dest) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { boolean result = true; final VMInstanceVO router = _vmDao.findById(vm.getId()); if (router.getState() == State.Running) { try { final PlugNicCommand plugNicCmd = new PlugNicCommand(nic, vm.getName(), vm.getType(), vm.getDetails()); final Commands cmds = new Commands(Command.OnError.Stop); cmds.addCommand("plugnic", plugNicCmd); _agentMgr.send(dest.getHost().getId(), cmds); final PlugNicAnswer plugNicAnswer = cmds.getAnswer(PlugNicAnswer.class); if (plugNicAnswer == null || !plugNicAnswer.getResult()) { s_logger.warn("Unable to plug nic for vm " + vm.getName()); result = false; } } catch (final OperationTimedoutException e) { throw new AgentUnavailableException("Unable to plug nic for router " + vm.getName() + " in network " + network, dest.getHost().getId(), e); } } else { s_logger.warn("Unable to apply PlugNic, vm " + router + " is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to apply PlugNic on the backend," + " vm " + vm + " is not in the right state", DataCenter.class, router.getDataCenterId()); } return result; }
@Override public Answer execute(final ScaleVmCommand command, final CitrixResourceBase citrixResourceBase) { final VirtualMachineTO vmSpec = command.getVirtualMachine(); final String vmName = vmSpec.getName(); try { final Connection conn = citrixResourceBase.getConnection();
final String vmName = vmSpec.getName();
final VirtualMachineTO vmSpec = command.getVirtualMachine(); final String name = vmSpec.getName(); try { final XsHost xsHost = xenServer610Resource.getHost();
/** * Generates the volume path by appending the Volume UUID to the Libvirt destiny images path.</br> * Example: /var/lib/libvirt/images/f3d49ecc-870c-475a-89fa-fd0124420a9b */ @Override protected String generateDestPath(VirtualMachineTO vmTO, VolumeVO srcVolume, Host destHost, StoragePoolVO destStoragePool, VolumeInfo destVolumeInfo) { DiskOfferingVO diskOffering = _diskOfferingDao.findById(srcVolume.getDiskOfferingId()); DiskProfile diskProfile = new DiskProfile(destVolumeInfo, diskOffering, HypervisorType.KVM); String templateUuid = getTemplateUuid(destVolumeInfo.getTemplateId()); CreateCommand rootImageProvisioningCommand = new CreateCommand(diskProfile, templateUuid, destStoragePool, true); Answer rootImageProvisioningAnswer = _agentMgr.easySend(destHost.getId(), rootImageProvisioningCommand); if (rootImageProvisioningAnswer == null) { throw new CloudRuntimeException(String.format("Migration with storage of vm [%s] failed while provisioning root image", vmTO.getName())); } if (!rootImageProvisioningAnswer.getResult()) { throw new CloudRuntimeException(String.format("Unable to modify target volume on the host [host id:%s, name:%s]", destHost.getId(), destHost.getName())); } String libvirtDestImgsPath = null; if (rootImageProvisioningAnswer instanceof CreateAnswer) { libvirtDestImgsPath = ((CreateAnswer)rootImageProvisioningAnswer).getVolume().getName(); } // File.getAbsolutePath is used to keep the file separator as it should be and eliminate a verification to check if exists a file separator in the last character of libvirtDestImgsPath. return new File(libvirtDestImgsPath, destVolumeInfo.getUuid()).getAbsolutePath(); }
@Override public synchronized StartAnswer execute(StartCommand cmd) { VirtualMachineTO vmSpec = cmd.getVirtualMachine(); String vmName = vmSpec.getName(); OvmVm.Details vmDetails = null; try { vmDetails = new OvmVm.Details(); applySpecToVm(vmDetails, vmSpec); createVbds(vmDetails, vmSpec); createVifs(vmDetails, vmSpec); startVm(vmDetails); // Add security group rules NicTO[] nics = vmSpec.getNics(); for (NicTO nic : nics) { if (nic.isSecurityGroupEnabled()) { if (vmSpec.getType().equals(VirtualMachine.Type.User)) { defaultNetworkRulesForUserVm(vmName, vmSpec.getId(), nic); } } } return new StartAnswer(cmd); } catch (Exception e) { s_logger.debug("Start vm " + vmName + " failed", e); cleanup(vmDetails); return new StartAnswer(cmd, e.getMessage()); } }
public boolean connectPhysicalDisksViaVmSpec(VirtualMachineTO vmSpec) { boolean result = false; final String vmName = vmSpec.getName(); List<DiskTO> disks = Arrays.asList(vmSpec.getDisks()); for (DiskTO disk : disks) { if (disk.getType() == Volume.Type.ISO) { result = true; continue; } VolumeObjectTO vol = (VolumeObjectTO)disk.getData(); PrimaryDataStoreTO store = (PrimaryDataStoreTO)vol.getDataStore(); if (!store.isManaged() && VirtualMachine.State.Migrating.equals(vmSpec.getState())) { result = true; continue; } KVMStoragePool pool = getStoragePool(store.getPoolType(), store.getUuid()); StorageAdaptor adaptor = getStorageAdaptor(pool.getType()); result = adaptor.connectPhysicalDisk(vol.getPath(), pool, disk.getDetails()); if (!result) { s_logger.error("Failed to connect disks via vm spec for vm: " + vmName + " volume:" + vol.toString()); return result; } } return result; }
final UnPlugNicCommand unplugNicCmd = new UnPlugNicCommand(nic, vm.getName()); cmds.addCommand("unplugnic", unplugNicCmd); _agentMgr.send(dest.getHost().getId(), cmds);
final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper(); final Connect conn = libvirtUtilitiesHelper.getConnectionByVmName(vm.getName()); for (final NicTO nic : nics) { libvirtComputingResource.getVifDriver(nic.getType(), nic.getName()).plug(nic, null, "", null);