Swift 合并 AVasset-Videos 数组

Swift Merge AVasset-Videos array

我想将 AVAsset-arrayVideos 合并成一个视频并将其保存在相机胶卷中。 Raywenderlich.com 有一个很棒的 tutorial,可以将两个视频合并为一个。我创建了以下代码,但是我在导出到相机胶卷后获得的视频仅包含数组中的第一个和最后一个视频(不包括 arrayVideos 中间的其余视频)。我在这里遗漏了什么吗?

var arrayVideos = [AVAsset]() //Videos Array    
var atTimeM: CMTime = CMTimeMake(0, 0)
var lastAsset: AVAsset!
var layerInstructionsArray = [AVVideoCompositionLayerInstruction]()
var completeTrackDuration: CMTime = CMTimeMake(0, 1)
var videoSize: CGSize = CGSize(width: 0.0, height: 0.0)

func mergeVideoArray(){

    let mixComposition = AVMutableComposition()
    for videoAsset in arrayVideos{
        let videoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
        do {
            if videoAsset == arrayVideos.first{
                atTimeM = kCMTimeZero
            } else{
                atTimeM = lastAsset!.duration
            }
            try videoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0], at: atTimeM)  
            videoSize = videoTrack.naturalSize
        } catch let error as NSError {
            print("error: \(error)")
        }
        completeTrackDuration = CMTimeAdd(completeTrackDuration, videoAsset.duration)
        let videoInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
        if videoAsset != arrayVideos.last{
            videoInstruction.setOpacity(0.0, at: videoAsset.duration)
        }
        layerInstructionsArray.append(videoInstruction)
        lastAsset = videoAsset            
    }

    let mainInstruction = AVMutableVideoCompositionInstruction()
    mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, completeTrackDuration)
    mainInstruction.layerInstructions = layerInstructionsArray        

    let mainComposition = AVMutableVideoComposition()
    mainComposition.instructions = [mainInstruction]
    mainComposition.frameDuration = CMTimeMake(1, 30)
    mainComposition.renderSize = CGSize(width: videoSize.width, height: videoSize.height)

    let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
    let dateFormatter = DateFormatter()
    dateFormatter.dateStyle = .long
    dateFormatter.timeStyle = .short
    let date = dateFormatter.string(from: NSDate() as Date)
    let savePath = (documentDirectory as NSString).appendingPathComponent("mergeVideo-\(date).mov")
    let url = NSURL(fileURLWithPath: savePath)

    let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
    exporter!.outputURL = url as URL
    exporter!.outputFileType = AVFileTypeQuickTimeMovie
    exporter!.shouldOptimizeForNetworkUse = true
    exporter!.videoComposition = mainComposition
    exporter!.exportAsynchronously {

        PHPhotoLibrary.shared().performChanges({
            PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: exporter!.outputURL!)
        }) { saved, error in
            if saved {
                let alertController = UIAlertController(title: "Your video was successfully saved", message: nil, preferredStyle: .alert)
                let defaultAction = UIAlertAction(title: "OK", style: .default, handler: nil)
                alertController.addAction(defaultAction)
                self.present(alertController, animated: true, completion: nil)
            } else{
                print("video erro: \(error)")

            }
        }
    }
} 

您需要跟踪所有资产的总时间并为每个视频更新它。

您问题中的代码正在用当前视频重写 atTimeM。这就是为什么只包含第一个和最后一个的原因。

它将看起来像这样:

...
var totalTime : CMTime = CMTimeMake(0, 0)

func mergeVideoArray() {

    let mixComposition = AVMutableComposition()
    for videoAsset in arrayVideos {
        let videoTrack = 
            mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, 
                                           preferredTrackID: Int32(kCMPersistentTrackID_Invalid))          
        do {
            if videoAsset == arrayVideos.first {
                atTimeM = kCMTimeZero
            } else {
                atTimeM = totalTime // <-- Use the total time for all the videos seen so far.
            }
            try videoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), 
                                           of: videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0], 
                                           at: atTimeM)  
            videoSize = videoTrack.naturalSize
        } catch let error as NSError {
            print("error: \(error)")
        }
        totalTime += videoAsset.duration // <-- Update the total time for all videos.
...

您可以删除 lastAsset 的使用。

您根本不需要 atTimeM,因为您只是简单地沿着 completeTrackDuration 行进,它是应该添加下一块的地方。所以替换

if videoAsset == arrayVideos.first{
            atTimeM = kCMTimeZero
        } else{
            atTimeM = lastAsset!.duration
        }
        try videoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0], at: atTimeM)

try videoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0], at: completeTrackDuration)

Swift 4

一样使用
MeargeVide.mergeVideoArray(arrayVideos: arrayAsset) { (urlMeargeVide, error) in
 debugPrint("url",urlMeargeVide ?? "")
                debugPrint("error",error ?? "")
}

完成 class 方向并将多个剪辑合并为一个。

class MeargeVide {

   static func orientationFromTransform(_ transform: CGAffineTransform)
        -> (orientation: UIImageOrientation, isPortrait: Bool) {
            var assetOrientation = UIImageOrientation.up
            var isPortrait = false
            if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
                assetOrientation = .right
                isPortrait = true
            } else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
                assetOrientation = .left
                isPortrait = true
            } else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
                assetOrientation = .up
            } else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
                assetOrientation = .down
            }
            return (assetOrientation, isPortrait)
    }

    static  func videoCompositionInstruction(_ track: AVCompositionTrack, asset: AVAsset)
        -> AVMutableVideoCompositionLayerInstruction {
            let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
            let assetTrack = asset.tracks(withMediaType: .video)[0]

            let transform = assetTrack.preferredTransform
            let assetInfo = orientationFromTransform(transform)

            var scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.width
            if assetInfo.isPortrait {
                scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.height
                let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
                instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor), at: kCMTimeZero)
            } else {
                let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
                var concat = assetTrack.preferredTransform.concatenating(scaleFactor)
                    .concatenating(CGAffineTransform(translationX: 0, y: UIScreen.main.bounds.width / 2))
                if assetInfo.orientation == .down {
                    let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
                    let windowBounds = UIScreen.main.bounds
                    let yFix = assetTrack.naturalSize.height + windowBounds.height
                    let centerFix = CGAffineTransform(translationX: assetTrack.naturalSize.width, y: yFix)
                    concat = fixUpsideDown.concatenating(centerFix).concatenating(scaleFactor)
                }
                instruction.setTransform(concat, at: kCMTimeZero)
            }

            return instruction
    }

    class func mergeVideoArray(arrayVideos:[AVAsset], callBack:@escaping (_ urlGet:URL?,_ errorGet:Error?) -> Void){

        var atTimeM: CMTime = CMTimeMake(0, 0)
        var lastAsset: AVAsset!
        var layerInstructionsArray = [AVVideoCompositionLayerInstruction]()
        var completeTrackDuration: CMTime = CMTimeMake(0, 1)
        var videoSize: CGSize = CGSize(width: 0.0, height: 0.0)
        var totalTime : CMTime = CMTimeMake(0, 0)

        let mixComposition = AVMutableComposition.init()
        for videoAsset in arrayVideos{

            let videoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
            do {
                if videoAsset == arrayVideos.first {
                    atTimeM = kCMTimeZero
                } else {
                    atTimeM = totalTime // <-- Use the total time for all the videos seen so far.
                }
                try videoTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration),
                                                of: videoAsset.tracks(withMediaType: AVMediaType.video)[0],
                                                at: completeTrackDuration)
                videoSize = (videoTrack?.naturalSize)!



            } catch let error as NSError {
                print("error: \(error)")
            }

            totalTime = CMTimeAdd(totalTime, videoAsset.duration)



            completeTrackDuration = CMTimeAdd(completeTrackDuration, videoAsset.duration)

            let firstInstruction = self.videoCompositionInstruction(videoTrack!, asset: videoAsset)
            firstInstruction.setOpacity(0.0, at: videoAsset.duration)

            layerInstructionsArray.append(firstInstruction)
            lastAsset = videoAsset
        }


        let mainInstruction = AVMutableVideoCompositionInstruction()
        mainInstruction.layerInstructions = layerInstructionsArray
        mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, completeTrackDuration)

        let mainComposition = AVMutableVideoComposition()
        mainComposition.instructions = [mainInstruction]
        mainComposition.frameDuration = CMTimeMake(1, 30)
        mainComposition.renderSize = CGSize(width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height)

        let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
        let dateFormatter = DateFormatter()
        dateFormatter.dateStyle = .long
        dateFormatter.timeStyle = .short
        let date = dateFormatter.string(from: NSDate() as Date)
        let savePath = (documentDirectory as NSString).appendingPathComponent("mergeVideo-\(date).mov")
        let url = NSURL(fileURLWithPath: savePath)

        let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
        exporter!.outputURL = url as URL
        exporter!.outputFileType = AVFileType.mp4
        exporter!.shouldOptimizeForNetworkUse = true
        exporter!.videoComposition = mainComposition
        exporter!.exportAsynchronously {
            DispatchQueue.main.async {
                callBack(exporter?.outputURL, exporter?.error)
            }

        }
    } 
}