return upload(new PutObjectRequest(bucketName, key, file));
return upload(new PutObjectRequest(bucketName, key, input, objectMetadata));
mManager.upload(putReq).waitForUploadResult(); if (!mFile.delete()) { LOG.error("Failed to delete temporary file @ {}", mFile.getPath());
public static void uploadFile(String file_path, String bucket_name, String key_prefix, boolean pause) { System.out.println("file: " + file_path + (pause ? " (pause)" : "")); String key_name = null; if (key_prefix != null) { key_name = key_prefix + '/' + file_path; } else { key_name = file_path; } File f = new File(file_path); TransferManager xfer_mgr = TransferManagerBuilder.standard().build(); try { Upload xfer = xfer_mgr.upload(bucket_name, key_name, f); // loop with Transfer.isDone() XferMgrProgress.showTransferProgress(xfer); // or block with Transfer.waitForCompletion() XferMgrProgress.waitForCompletion(xfer); } catch (AmazonServiceException e) { System.err.println(e.getErrorMessage()); System.exit(1); } xfer_mgr.shutdownNow(); }
TransferManager xfer_mgr = TransferManagerBuilder.standard().build(); try { Upload u = xfer_mgr.upload(bucket_name, key_name, f);
Upload upload = transferManager.upload(request);
Upload upload = mManager.upload(uploadRequest); return new S3UploadHandle(upload);
public static Upload putFile(final ClientOptions clientOptions, final File sourceFile, final String bucketName, final String key) { LOGGER.debug(format("Sending file %1$s as S3 object %2$s in bucket %3$s", sourceFile.getName(), key, bucketName)); return getTransferManager(clientOptions).upload(bucketName, key, sourceFile); }
public static Upload putObject(final ClientOptions clientOptions, final InputStream sourceStream, final String bucketName, final String key) { LOGGER.debug(format("Sending stream as S3 object %1$s in bucket %2$s", key, bucketName)); return getTransferManager(clientOptions).upload(bucketName, key, sourceStream, null); }
Upload upload = tm.upload(existingBucketName, "/game/" + keyName + ".json", new File(path));
public static Upload putObject(final ClientOptions clientOptions, final PutObjectRequest req) { LOGGER.debug(format("Sending stream as S3 object %1$s in bucket %2$s using PutObjectRequest", req.getKey(), req.getBucketName())); return getTransferManager(clientOptions).upload(req); }
@Override public Upload upload(PutObjectRequest putObjectRequest, TransferManager transferManager) { return transferManager.upload(putObjectRequest); }
@Override public Upload upload(PutObjectRequest putObjectRequest, TransferManager transferManager) { return transferManager.upload(putObjectRequest); }
AWSCredentials credentials = new BasicAWSCredentials(MY_ACCESS_KEY_ID,MY_SECRET_KEY); TransferManager manager = new TransferManager(credentials); Upload upload = manager.upload(pictureBucket,"images/"+fileName,new File(fileURI));
TransferManager transferManager = new TransferManager(provider); String bucket = "uni-cloud"; File file = new File("//sdcard//Download//cw.pdf"); if(file.exists()) { Log.e(TAG,"File found " + file.getName()); } else { Log.e(TAG,"File not found"); } Upload upload = transferManager.upload(bucket, file.getName(), file); while (!upload.isDone()){ //Show a progress bar... TransferProgress transferred = upload.getProgress(); Toast.makeText(this, "Uploading... ", Toast.LENGTH_LONG).show(); Log.i("Percentage", "" +transferred.getPercentTransferred()); } Toast.makeText(this, "Uploaded", Toast.LENGTH_LONG).show();
/** * Use this method to reliably upload large files and wait until they are fully uploaded before continuing. Behind the scenes this is accomplished by splitting the file up into * manageable chunks and using separate threads to upload each chunk. Consider using multi-part uploads on files larger than <code>MULTI_PART_UPLOAD_THRESHOLD</code>. When this * method returns, all threads have finished and the file has been reassembled on S3. The benefit to this method is that if any one thread fails, only the portion of the file * that particular thread was handling will have to be re-uploaded (instead of the entire file). A reasonable number of automatic retries occurs if an individual upload thread * fails. If the file upload fails this method throws <code>AmazonS3Exception</code> */ public void blockingMultiPartUpload(PutObjectRequest request, TransferManager manager) { // Use multi-part upload for large files Upload upload = manager.upload(request); try { // Block and wait for the upload to finish upload.waitForCompletion(); } catch (Exception e) { throw new AmazonS3Exception("Unexpected error uploading file", e); } }
ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentEncoding("UTF-8"); size = inputStream.available(); metadata.setContentLength(size); TransferManager transferManager = new TransferManager(credentialsProvider); Upload upload = transferManager.upload(bucket_name, key, images3, metadata); upload.waitForCompletion();
private void transferFileToS3(final String key) { final long fileSizeMb = file.length() / (1024 * 1024); getLogger().lifecycle("Uploading {} MB from file {} to {}...", fileSizeMb, file, getS3Url()); final TransferManager transferManager = createTransferManager(); final Instant start = Instant.now(); final Upload upload = transferManager.upload(config.getDeploymentBucket(), key, file); try { upload.waitForCompletion(); final Duration uploadDuration = Duration.between(start, Instant.now()); getLogger().lifecycle("Uploaded {} to {} in {}", file, getS3Url(), uploadDuration); } catch (final InterruptedException e) { Thread.currentThread().interrupt(); throw new AssertionError("Upload interrupted", e); } }
private void verifyTMUpload(TransferManager mock, ExpectedRequestParams[] expect) { ArgumentCaptor<String> bucketCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<String> keyCaptor = ArgumentCaptor.forClass(String.class); verify(mock, times(expect.length)).upload(bucketCaptor.capture(), keyCaptor.capture() , any(File.class)); List<String> bucketArgs = bucketCaptor.getAllValues(); List<String> keyArgs = keyCaptor.getAllValues(); for (int i = 0; i < expect.length; i++) { assertEquals(expect[i].bucket, bucketArgs.remove(0)); assertEquals(expect[i].key, keyArgs.remove(0)); } }
@Override public void saveData(String directory, String fileName, PersistenceBuffer persistenceBuffer) { try { ObjectMetadata metadata = new ObjectMetadata(); // Set content encoding to gzip. This way browsers will decompress on download using native deflate code. // http://www.rightbrainnetworks.com/blog/serving-compressed-gzipped-static-files-from-amazon-s3-or-cloudfront/ metadata.setContentEncoding("gzip"); metadata.setContentType(persistenceBuffer.getMimeType()); // We must setContentLength or the S3 client will re-buffer the InputStream into another memory buffer. metadata.setContentLength(persistenceBuffer.getSize()); // amazonS3.putObject(directory, fileName, persistenceBuffer.getInputStream(), metadata); final Upload upload = transferManager.upload(directory, fileName, persistenceBuffer.getInputStream(), metadata); upload.addProgressListener(new UploadProgressLogger(upload)); // Block until upload completes to avoid accumulating unlimited uploads in memory. upload.waitForCompletion(); } catch (Exception e) { throw new RuntimeException(e); } }