Giter Club home page Giter Club logo

random_geospatial's Introduction

Just for fun ๐Ÿง‘โ€๐Ÿ”ฌ Screen Shot 2023-05-21 at 1 12 42 PM not my model... not a particularly good model... just weights I found and put through my pipeline for geospatial broad area inference

random_geospatial's People

Contributors

benhuff avatar

Watchers

 avatar

random_geospatial's Issues

yolo2via

Just some classes to handle things...

import os
import logging
from PIL import Image
import json
from easydict import EasyDict as edict
from tqdm import tqdm

class YoloDuo:
    def __init__(self, img_path, lbl_path):
        
        self.img_fn = img_path.split('/')[-1]
        self.img_bytes = os.stat(img_path).st_size
        self.img_w, self.img_h = self.get_image_size(img_path)
        self.regions = self.get_regions(lbl_path)
        self.key = self.img_fn + str(self.img_bytes)

        self.img_metadata = self._generate_metadata()

    def get_regions(self, label_path):
        regions = []
        with open(label_path, "r") as lbl_file:
            for lbl in lbl_file:
                region = {}

                label_parts = lbl.strip().split(" ")
                category_id = label_parts[0]

                x_center, y_center, bbox_width, bbox_height = map(float, label_parts[1:])
                x_min = int((x_center - bbox_width / 2) * self.img_w)
                y_min = int((y_center - bbox_height / 2) * self.img_h)

                width = int(bbox_width * self.img_w)
                height = int(bbox_height * self.img_h)

                region['shape_attributes'] = {
                    'name': 'rect',
                    'x': x_min,
                    'y': y_min,
                    'width': width,
                    'height': height
                }

                region['region_attributes'] = {
                    'object': category_id
                }

                regions.append(region)
        return regions

    def _generate_metadata(self):
        value = dict()
        value['filename'] = self.img_fn
        value['size'] = self.img_bytes
        value['regions'] = self.regions
        value['file_attributes'] = {}
        return value

    @staticmethod
    def get_image_size(image_path):
        with Image.open(image_path) as img:
            return img.size


class ViaProject:
    def __init__(self, project_name, images_folder, labels_folder, output_file, class_dict={'0': 'single_cls'}):
        self.version = '2.0.10'
        self.settings = {
            "ui": {
                "annotation_editor_height": 25,
                "annotation_editor_fontsize": 0.8,
                "leftsidebar_width": 18,
                "image_grid": {
                    "img_height": 80,
                    "rshape_fill": "none",
                    "rshape_fill_opacity": 0.3,
                    "rshape_stroke": "yellow",
                    "rshape_stroke_width": 2,
                    "show_region_shape": True,
                    "show_image_policy": "all"
                },
                "image": {
                    "region_label": "__via_region_id__",
                    "region_color": "__via_default_region_color__",
                    "region_label_font": "10px Sans",
                    "on_image_annotation_editor_placement": "NEAR_REGION"
                }
            },
            "core": {
                "buffer_size": 18,
                "filepath": {},
                "default_filepath": images_folder
            },
            "project": {
                "name": project_name
            }
        }
        self.attributes = {
            "region": {
                "object": {
                    "type": "radio",
                    "description": "",
                    "options": class_dict,
                    "default_options": {}
                }
            },
            "file": {}
        }
        self.id_list, self.metadata = self.generate_dataset_metadata(images_folder, labels_folder)
        self.via_project = self.generate_via_json(output_file)
    
    def generate_dataset_metadata(self, images_folder, labels_folder):
        image_id_list = []
        img_metadata_dict = {}

        filenames = [x for x in os.listdir(labels_folder) if x.endswith('.txt')]
        pbar = tqdm(filenames, colour='#0091ea')
        for fn in pbar:
            pbar.set_description(f'Generating metadata from {fn}..')
            img_path = os.path.join(images_folder, fn.replace('.txt','.jpeg'))
            lbl_path = os.path.join(labels_folder, fn)
            if not os.path.exists(img_path) and os.path.exists(lbl_path):
                logging.warning(f"Could not find both image and label file for {fn.replace('.txt', '')}")
            else:
                data = YoloDuo(img_path, lbl_path)
                img_metadata_dict[data.key] = data.img_metadata
                image_id_list.append(data.key)
        return image_id_list, img_metadata_dict
    
    def generate_via_json(self, output_file):
        via_dict = edict()
        via_dict._via_data_format_version = self.version
        via_dict._via_settings = self.settings
        via_dict._via_attributes = self.attributes
        via_dict._via_image_id_list = self.id_list
        via_dict._via_img_metadata = self.metadata
        with open(output_file, 'w') as f:
            json.dump(via_dict, f)
        return via_dict

export a collection

var nightlights = ee.ImageCollection('NOAA/VIIRS/DNB/MONTHLY_V1/VCMSLCFG')
                  .filter(ee.Filter.date('2020-01-01', '2022-06-30'));
                  
var img = nightlights.first();
var proj = img.projection().getInfo();
var crs = proj.crs;
var transform = proj.transform;
print('VIIRS Nightlights',nightlights);
print('CRS',crs);
print('Transform',transform);

function clip(image){
  return image.clip(box);
}

var nightlights = nightlights.select('avg_rad').map(clip);

var nightlist = ee.ImageCollection(nightlights).toList(nightlights.size());
print('Clipped VIIRS Nightlights: Converted to List',nightlist);

var size = nightlights.size().getInfo();
for(var i=0;i<size;i++){
  var img = ee.Image(nightlist.get(i));
  var imgname = img.id().getInfo();
  Export.image.toDrive({
    image:img,
    folder:'Venezuela_Nightlights',
    description:'VIIRS_'+imgname,
    crs:crs,
    crsTransform:transform,
    region:box,
  });
}

yolo plotting utility

import os
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.patches as patches

def plot_yolo_bounding_boxes(path_to_png, path_to_txt, category_map=None):
    # Open the image using PIL
    image = Image.open(path_to_png)
    image_width, image_height = image.size

    # Define colors for different category IDs
    category_colors = {1: 'red', 2: 'green', 3: 'blue', 4: 'yellow'}  # Update with your own category IDs and colors

    # Load the bounding box coordinates from the .txt file
    with open(path_to_txt, "r") as txt_file:
        for line in txt_file:
            label_parts = line.strip().split(" ")
            category_id = int(label_parts[0])
            x_center, y_center, bbox_width, bbox_height = map(float, label_parts[1:])
            x_min = max(0, int((x_center - bbox_width / 2) * image_width))
            y_min = max(0, int((y_center - bbox_height / 2) * image_height))
            x_max = min(image_width, int((x_center + bbox_width / 2) * image_width))
            y_max = min(image_height, int((y_center + bbox_height / 2) * image_height))

            # Get color for current category ID
            bbox_color = category_colors.get(category_id, 'red')  # Default to red if category ID not found

            # Plot the bounding box on the image with category-specific color
            rect = patches.Rectangle((x_min, y_min), (x_max - x_min), (y_max - y_min),
                                      fill=None, edgecolor=bbox_color, linewidth=1)
            plt.gca().add_patch(rect)
            if category_map is not None:
                category_name = category_map[category_id]
            else:
                category_name = f"Class {category_id}"
            plt.text(x_min, y_min, category_name, color=bbox_color, fontsize=6, weight='bold')

    # Show the image with bounding boxes
    plt.imshow(image)
    plt.axis('off')
    plt.show()

# Example usage:
path_to_png = "image1.jpeg"     # Path to the PNG image
path_to_txt = "image1.txt"     # Path to the YOLO formatted .txt file
plot_yolo_bounding_boxes(path_to_png, path_to_txt)

# Or
# Example usage:
fig, axs = plt.subplots(4, 4, figsize=(15, 15))  # Create a 4x4 grid of subplots
axs = axs.ravel()  # Flatten the subplots array

for i in range(16):
    path_to_png = f"example_{i+1}.png"     # Path to the PNG image
    path_to_txt = f"example_{i+1}.txt"     # Path to the YOLO formatted .txt file
    axs[i].set_title(f"Example {i+1}")
    plot_yolo_bounding_boxes(path_to_png, path_to_txt)

plt.tight_layout()
plt.show()

OLS

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm

X = sm.add_constant(merge['avg_rad'])
model = sm.OLS(merge['_mean'],X)
results = model.fit()
plt.scatter(merge['avg_rad'],merge['_mean'],alpha=0.3, marker='o')
y_predict = results.params[0] + results.params[1]*merge['avg_rad']
plt.plot(merge['avg_rad'],y_predict, linewidth=3)
plt.xlabel('avg_rad')
plt.ylabel('_mean')
plt.title('OLS Regression')

print(results.summary())

stuff

import os
from PIL import Image
import rasterio
from rasterio.windows import Window
import cv2

def split_image(image_path, square_size, overlap):
    # Open the image using Rasterio
    with rasterio.open(image_path) as src:
        # Get the size of the image
        height, width = src.height, src.width
        # Iterate over the rows and columns of the image
        for row in range(0, height, square_size - overlap):
            for col in range(0, width, square_size - overlap):
                # Define the window corresponding to the current square
                if row + square_size > height or col + square_size > width:
                    pass
                else:
                    window = Window(col, row, square_size, square_size)
                    # Read the data for the current square
                    data = src.read(1, window=window)

                    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
                    eq_data = clahe.apply(data.astype('uint8'))

                    # Define the metadata for the output file
                    meta = src.meta.copy()
                    meta.update({
                        'width': window.width,
                        'height': window.height,
                        'transform': rasterio.windows.transform(window, src.transform)
                    })
                    # Define the output file name
                    out_file = f'square_{row}_{col}.tif'
                    # Write the current square to the output file
                    Image.fromarray(eq_data).save(out_file.replace('.tif', '.png'))
                    with rasterio.open(out_file, 'w', **meta) as out:
                        out.write(eq_data, 1)
import json
import os
import shutil

def get_img(filename):
    for img in coco_anno['images']:
        if img['file_name'] == filename:
            return img

def get_img_ann(image_id):
    img_ann = []
    isFound = False
    for ann in coco_anno['annotations']:
        if ann['image_id'] == image_id:
            img_ann.append(ann)
            isFound = True
    if isFound:
        return img_ann
    else: 
        return None

def coco_to_yolo(x1, y1, w, h, image_w, image_h):
    return [
        ((2*x1 + w)/(2*image_w)), 
        ((2*y1 + h)/(2*image_h)), 
        w/image_w, 
        h/image_h
    ]

coco = 'test_chips/via_project_30Mar2023_18h55m_coco-2.json'
coco_json = open(coco)
coco_anno = json.load(coco_json)
coco_json.close()

file_names = [] 

for file in os.listdir('test_chips/'):
    if file.endswith('.png'):
        file_names.append(file)

file_names

for filename in file_names:
    img = get_img(filename)
    
    img_id = img['id']
    img_w = img['width']
    img_h = img['height']

    img_ann = get_img_ann(img_id)

    if img_ann:
        # shutil to move image to a new images folder
        # shutil.copy(source, destination)

        lbl_fn = filename.replace('.png', '.txt')
        txt = open(f'test_chips/labels/{lbl_fn}', 'a')

        for ann in img_ann:
            category = ann['category_id'] - 1
            current_bbox = ann['bbox']
            x = current_bbox[0]
            y = current_bbox[1]
            w = current_bbox[2]
            h = current_bbox[3]

            xc, yc, w, h = coco_to_yolo(x, y, w, h, img_w, img_h)
            txt.write(f"{category} {xc} {yc} {w} {h}\n")

        txt.close()
import rasterio
from shapely.geometry import box
import geopandas as gpd

def bbox_to_geospatial(src, xmin, ymin, xmax, ymax):
    # Convert the pixel bounds to geospatial bounds
    xmin, ymin = rasterio.transform.xy(src.transform, xmin, ymin, offset='ul')
    xmax, ymax = rasterio.transform.xy(src.transform, xmax, ymax, offset='ul')

    # Create the polygon
    polygon = box(xmin, ymin, xmax, ymax)
    return polygon

def yolo_to_bbox(yolo_coords, image_w, image_h):
    x_center, y_center, w, h = yolo_coords
    w = w * image_w
    h = h * image_h
    x1 = ((2 * x_center * image_w) - w)/2
    y1 = ((2 * y_center * image_h) - h)/2
    x2 = x1 + w
    y2 = y1 + h
    return [x1, y1, x2, y2]

# Load the geospatial metadata
with rasterio.open('square_0_0.tif') as src:
    crs = src.crs
    
    # Load the text file
    with open('coords.txt', 'r') as f:
        coords = [tuple(map(float, line.strip().split())) for line in f]

    # Convert the pixel coordinates to geographic coordinates
    geoms = [bbox_to_geospatial(src, *yolo_to_bbox(yolo_coords, src.width, src.height)) for yolo_coords in coords]

# Create a GeoDataFrame with the converted geometries
gdf = gpd.GeoDataFrame(geometry=geoms, crs=crs)

SAR CCDC

S1 Preprocessing for Analysis Ready Data
Border Noise Correction

/* File: border_noise_correction.js
Version: v1.1
Date: 2021-03-11
Authors: Adopted from Hird et al. 2017 Remote Sensing (supplementary material): http://www.mdpi.com/2072-4292/9/12/1315)
Description: This script applied additional border noise correction */

var helper = require('users/benhuffwork/s1_ard:utilities');

//---------------------------------------------------------------------------//
// Additional Border Noise Removal
//---------------------------------------------------------------------------//
/** (mask out angles >= 45.23993) */
var maskAngLT452 = function(image) {
 var ang = image.select(['angle']);
 return image.updateMask(ang.lt(45.23993)).set('system:time_start', image.get('system:time_start'));
};

/** Function to mask out edges of images using angle.
 * (mask out angles <= 30.63993) */
var maskAngGT30 = function(image) {
 var ang = image.select(['angle']);
 return image.updateMask(ang.gt(30.63993)).set('system:time_start', image.get('system:time_start'));
};

/** Remove edges.*/
var maskEdge = function(image) {
  var mask = image.select(0).unitScale(-25, 5).multiply(255).toByte()//.connectedComponents(ee.Kernel.rectangle(1,1), 100);
  return image.updateMask(mask.select(0)).set('system:time_start', image.get('system:time_start'));  
};

/** Mask edges. This function requires that the input image has one VH or VV band, and an 'angle' bands.  */
exports.f_mask_edges = function(image) {
  var db_img = helper.lin_to_db(image)
  var output = maskAngGT30(db_img);
  output = maskAngLT452(output);
  //output = maskEdge(output);
  output = helper.db_to_lin(output);
  return output.set('system:time_start', image.get('system:time_start'));
};

Speckle Filter

/* File: speckle_filter.js
Version: v1.1
Date: 2021-03-11
Authors: Mullissa A., Vollrath A., Braun, C., Slagter B., Balling J., Gou Y., Gorelick N.,  Reiche J.
Description: This script contains functions for implementing both monotemporal and multi-temporal speckle filters */


//---------------------------------------------------------------------------//
// Boxcar filter
//---------------------------------------------------------------------------//
/** Applies boxcar filter on every image in the collection. */
var boxcar = function(image, KERNEL_SIZE) {
    var bandNames = image.bandNames().remove('angle');
    // Define a boxcar kernel
    var kernel = ee.Kernel.square({radius: (KERNEL_SIZE/2), units: 'pixels', normalize: true});
    // Apply boxcar
    var output = image.select(bandNames).convolve(kernel).rename(bandNames);
  return image.addBands(output, null, true)
};

//---------------------------------------------------------------------------//
// Lee filter 
//---------------------------------------------------------------------------//
/** Lee Filter applied to one image. It is implemented as described in 
 J. S. Lee, โ€œDigital image enhancement and noise filtering by use of local statistics,โ€ 
 IEEE Pattern Anal. Machine Intell., vol. PAMI-2, pp. 165โ€“168, Mar. 1980.*/
 
var leefilter = function(image,KERNEL_SIZE) {
        var bandNames = image.bandNames().remove('angle');
        //S1-GRD images are multilooked 5 times in range
        var enl = 5
        // Compute the speckle standard deviation
        var eta = 1.0/Math.sqrt(enl); 
        eta = ee.Image.constant(eta);

        // MMSE estimator
        // Neighbourhood mean and variance
        var oneImg = ee.Image.constant(1);

        var reducers = ee.Reducer.mean().combine({
                      reducer2: ee.Reducer.variance(),
                      sharedInputs: true
                      });
        var stats = image.select(bandNames).reduceNeighborhood({reducer: reducers,kernel: ee.Kernel.square(KERNEL_SIZE/2,'pixels'), optimization: 'window'})
        var meanBand = bandNames.map(function(bandName){return ee.String(bandName).cat('_mean')});
        var varBand = bandNames.map(function(bandName){return ee.String(bandName).cat('_variance')});
        
        var z_bar = stats.select(meanBand);
        var varz = stats.select(varBand);

        // Estimate weight 
        var varx = (varz.subtract(z_bar.pow(2).multiply(eta.pow(2)))).divide(oneImg.add(eta.pow(2)));
        var b = varx.divide(varz);
  
        //if b is negative set it to zero
        var new_b = b.where(b.lt(0), 0)
        var output = oneImg.subtract(new_b).multiply(z_bar.abs()).add(new_b.multiply(image.select(bandNames)));
        output = output.rename(bandNames);
        return image.addBands(output, null, true);
  }   


//---------------------------------------------------------------------------//
// GAMMA MAP filter 
//---------------------------------------------------------------------------//
/** Gamma Maximum a-posterior Filter applied to one image. It is implemented as described in 
Lopes A., Nezry, E., Touzi, R., and Laur, H., 1990.  Maximum A Posteriori Speckle Filtering and First Order texture Models in SAR Images.  
International  Geoscience  and  Remote  Sensing  Symposium (IGARSS).  */

var gammamap =  function(image,KERNEL_SIZE) { 
        var enl = 5;
        var bandNames = image.bandNames().remove('angle');
        //Neighbourhood stats
        var reducers = ee.Reducer.mean().combine({
                      reducer2: ee.Reducer.stdDev(),
                      sharedInputs: true
                      });
        var stats = image.select(bandNames).reduceNeighborhood({reducer: reducers,kernel: ee.Kernel.square(KERNEL_SIZE/2,'pixels'), optimization: 'window'})
        var meanBand = bandNames.map(function(bandName){return ee.String(bandName).cat('_mean')});
        var stdDevBand = bandNames.map(function(bandName){return ee.String(bandName).cat('_stdDev')});
        
        var z = stats.select(meanBand);
        var sigz = stats.select(stdDevBand);
        
        // local observed coefficient of variation
        var ci = sigz.divide(z);
        // noise coefficient of variation (or noise sigma)
        var cu = 1.0/Math.sqrt(enl);
        // threshold for the observed coefficient of variation
        var cmax = Math.sqrt(2.0) * cu
  
        cu = ee.Image.constant(cu);
        cmax = ee.Image.constant(cmax);
        var enlImg = ee.Image.constant(enl);
        var oneImg = ee.Image.constant(1);
        var twoImg = ee.Image.constant(2);
  
        var alpha = oneImg.add(cu.pow(2)).divide(ci.pow(2).subtract(cu.pow(2)));

        //Implements the Gamma MAP filter described in equation 11 in Lopez et al. 1990
        var q = image.select(bandNames).expression("z**2 * (z * alpha - enl - 1)**2 + 4 * alpha * enl * b() * z", {z: z, alpha: alpha,enl: enl})
        var rHat = z.multiply(alpha.subtract(enlImg).subtract(oneImg)).add(q.sqrt()).divide(twoImg.multiply(alpha));
  
        //if ci <= cu then its a homogenous region ->> boxcar filter
        var zHat = (z.updateMask(ci.lte(cu))).rename(bandNames)
        //if cmax > ci > cu then its a textured medium ->> apply Gamma MAP filter
        rHat = (rHat.updateMask(ci.gt(cu)).updateMask(ci.lt(cmax))).rename(bandNames)
        //if ci>=cmax then its strong signal ->> retain
        var x = image.select(bandNames).updateMask(ci.gte(cmax)).rename(bandNames)
  
        // Merge
        var output = ee.ImageCollection([zHat,rHat,x]).sum();
        return image.addBands(output, null, true);
  }   

//---------------------------------------------------------------------------//
// Refined Lee filter 
//---------------------------------------------------------------------------//
/** This filter is modified from the implementation by Guido Lemoine 
 * Source: Lemoine et al.; https://code.earthengine.google.com/5d1ed0a0f0417f098fdfd2fa137c3d0c */

var refinedLee = function(image) {

    var bandNames = image.bandNames().remove('angle');
  
    var result = ee.ImageCollection(bandNames.map(function(b){
    var img = image.select([b]);
    
    // img must be linear, i.e. not in dB!
    // Set up 3x3 kernels 
    var weights3 = ee.List.repeat(ee.List.repeat(1,3),3);
    var kernel3 = ee.Kernel.fixed(3,3, weights3, 1, 1, false);
  
    var mean3 = img.reduceNeighborhood(ee.Reducer.mean(), kernel3);
    var variance3 = img.reduceNeighborhood(ee.Reducer.variance(), kernel3);
  
    // Use a sample of the 3x3 windows inside a 7x7 windows to determine gradients and directions
    var sample_weights = ee.List([[0,0,0,0,0,0,0], [0,1,0,1,0,1,0],[0,0,0,0,0,0,0], [0,1,0,1,0,1,0], [0,0,0,0,0,0,0], [0,1,0,1,0,1,0],[0,0,0,0,0,0,0]]);
  
    var sample_kernel = ee.Kernel.fixed(7,7, sample_weights, 3,3, false);
  
    // Calculate mean and variance for the sampled windows and store as 9 bands
    var sample_mean = mean3.neighborhoodToBands(sample_kernel); 
    var sample_var = variance3.neighborhoodToBands(sample_kernel);
  
    // Determine the 4 gradients for the sampled windows
    var gradients = sample_mean.select(1).subtract(sample_mean.select(7)).abs();
    gradients = gradients.addBands(sample_mean.select(6).subtract(sample_mean.select(2)).abs());
    gradients = gradients.addBands(sample_mean.select(3).subtract(sample_mean.select(5)).abs());
    gradients = gradients.addBands(sample_mean.select(0).subtract(sample_mean.select(8)).abs());
  
    // And find the maximum gradient amongst gradient bands
    var max_gradient = gradients.reduce(ee.Reducer.max());
  
    // Create a mask for band pixels that are the maximum gradient
    var gradmask = gradients.eq(max_gradient);
  
    // duplicate gradmask bands: each gradient represents 2 directions
    gradmask = gradmask.addBands(gradmask);
  
    // Determine the 8 directions
    var directions = sample_mean.select(1).subtract(sample_mean.select(4)).gt(sample_mean.select(4).subtract(sample_mean.select(7))).multiply(1);
    directions = directions.addBands(sample_mean.select(6).subtract(sample_mean.select(4)).gt(sample_mean.select(4).subtract(sample_mean.select(2))).multiply(2));
    directions = directions.addBands(sample_mean.select(3).subtract(sample_mean.select(4)).gt(sample_mean.select(4).subtract(sample_mean.select(5))).multiply(3));
    directions = directions.addBands(sample_mean.select(0).subtract(sample_mean.select(4)).gt(sample_mean.select(4).subtract(sample_mean.select(8))).multiply(4));
    // The next 4 are the not() of the previous 4
    directions = directions.addBands(directions.select(0).not().multiply(5));
    directions = directions.addBands(directions.select(1).not().multiply(6));
    directions = directions.addBands(directions.select(2).not().multiply(7));
    directions = directions.addBands(directions.select(3).not().multiply(8));
  
    // Mask all values that are not 1-8
    directions = directions.updateMask(gradmask);
  
    // "collapse" the stack into a singe band image (due to masking, each pixel has just one value (1-8) in it's directional band, and is otherwise masked)
    directions = directions.reduce(ee.Reducer.sum());  
  
    //var pal = ['ffffff','ff0000','ffff00', '00ff00', '00ffff', '0000ff', 'ff00ff', '000000'];
    //Map.addLayer(directions.reduce(ee.Reducer.sum()), {min:1, max:8, palette: pal}, 'Directions', false);
  
    var sample_stats = sample_var.divide(sample_mean.multiply(sample_mean));
  
    // Calculate localNoiseVariance
    var sigmaV = sample_stats.toArray().arraySort().arraySlice(0,0,5).arrayReduce(ee.Reducer.mean(), [0]);
  
    // Set up the 7*7 kernels for directional statistics
    var rect_weights = ee.List.repeat(ee.List.repeat(0,7),3).cat(ee.List.repeat(ee.List.repeat(1,7),4));
  
    var diag_weights = ee.List([[1,0,0,0,0,0,0], [1,1,0,0,0,0,0], [1,1,1,0,0,0,0], 
      [1,1,1,1,0,0,0], [1,1,1,1,1,0,0], [1,1,1,1,1,1,0], [1,1,1,1,1,1,1]]);
  
    var rect_kernel = ee.Kernel.fixed(7,7, rect_weights, 3, 3, false);
    var diag_kernel = ee.Kernel.fixed(7,7, diag_weights, 3, 3, false);
  
    // Create stacks for mean and variance using the original kernels. Mask with relevant direction.
    var dir_mean = img.reduceNeighborhood(ee.Reducer.mean(), rect_kernel).updateMask(directions.eq(1));
    var dir_var = img.reduceNeighborhood(ee.Reducer.variance(), rect_kernel).updateMask(directions.eq(1));
  
    dir_mean = dir_mean.addBands(img.reduceNeighborhood(ee.Reducer.mean(), diag_kernel).updateMask(directions.eq(2)));
    dir_var = dir_var.addBands(img.reduceNeighborhood(ee.Reducer.variance(), diag_kernel).updateMask(directions.eq(2)));
  
    // and add the bands for rotated kernels
    for (var i=1; i<4; i++) {
      dir_mean = dir_mean.addBands(img.reduceNeighborhood(ee.Reducer.mean(), rect_kernel.rotate(i)).updateMask(directions.eq(2*i+1)));
      dir_var = dir_var.addBands(img.reduceNeighborhood(ee.Reducer.variance(), rect_kernel.rotate(i)).updateMask(directions.eq(2*i+1)));
      dir_mean = dir_mean.addBands(img.reduceNeighborhood(ee.Reducer.mean(), diag_kernel.rotate(i)).updateMask(directions.eq(2*i+2)));
      dir_var = dir_var.addBands(img.reduceNeighborhood(ee.Reducer.variance(), diag_kernel.rotate(i)).updateMask(directions.eq(2*i+2)));
    }
  
    // "collapse" the stack into a single band image (due to masking, each pixel has just one value in it's directional band, and is otherwise masked)
    dir_mean = dir_mean.reduce(ee.Reducer.sum());
    dir_var = dir_var.reduce(ee.Reducer.sum());
  
    // A finally generate the filtered value
    var varX = dir_var.subtract(dir_mean.multiply(dir_mean).multiply(sigmaV)).divide(sigmaV.add(1.0));
  
    var b = varX.divide(dir_var);
  
    return dir_mean.add(b.multiply(img.subtract(dir_mean)))
      .arrayProject([0])
      // Get a multi-band image bands.
      .arrayFlatten([['sum']])
      .float();
  })).toBands().rename(bandNames).copyProperties(image);
  return image.addBands(result, null, true) 
  } 
//---------------------------------------------------------------------------//
// Improved Lee Sigma filter 
//---------------------------------------------------------------------------//
/** Implements the improved lee sigma filter to one image. 
It is implemented as described in, Lee, J.-S.; Wen, J.-H.; Ainsworth, T.L.; Chen, K.-S.; Chen, A.J. Improved sigma filter for speckle filtering of SAR imagery. 
IEEE Trans. Geosci. Remote Sens. 2009, 47, 202โ€“213. */

var leesigma = function(image,KERNEL_SIZE) {
        //parameters
        var Tk = ee.Image.constant(7); //number of bright pixels in a 3x3 window
        var sigma = 0.9;
        var enl = 4;
        var target_kernel = 3;
        var bandNames = image.bandNames().remove('angle');
  
        //compute the 98 percentile intensity 
        var z98 = image.select(bandNames).reduceRegion({
                reducer: ee.Reducer.percentile([98]),
                geometry: image.geometry(),
                scale:10,
                maxPixels:1e13
            }).toImage();
            
        //select the strong scatterers to retain
        var brightPixel = image.select(bandNames).gte(z98);
        var K = brightPixel.reduceNeighborhood({reducer: ee.Reducer.countDistinctNonNull()
                            ,kernel: ee.Kernel.square((target_kernel/2) ,'pixels')}); 
        var retainPixel = K.gte(Tk);
  
  
        //compute the a-priori mean within a 3x3 local window
        //original noise standard deviation
        var eta = 1.0/Math.sqrt(enl);
        eta = ee.Image.constant(eta);
        //MMSE applied to estimate the a-priori mean
        var reducers = ee.Reducer.mean().combine({
                      reducer2: ee.Reducer.variance(),
                      sharedInputs: true
                      });
        var stats = image.select(bandNames).reduceNeighborhood({reducer: reducers,kernel: ee.Kernel.square(target_kernel/2,'pixels'), optimization: 'window'})
        var meanBand = bandNames.map(function(bandName){return ee.String(bandName).cat('_mean')});
        var varBand = bandNames.map(function(bandName){return ee.String(bandName).cat('_variance')});
        var z_bar = stats.select(meanBand);
        var varz = stats.select(varBand);
        
        var oneImg = ee.Image.constant(1);
        var varx = (varz.subtract(z_bar.abs().pow(2).multiply(eta.pow(2)))).divide(oneImg.add(eta.pow(2)));
        var b = varx.divide(varz);
        var xTilde = oneImg.subtract(b).multiply(z_bar.abs()).add(b.multiply(image.select(bandNames)));
  
        //step 3: compute the sigma range
        // Lookup table (J.S.Lee et al 2009) for range and eta values for intensity (only 4 look is shown here)
        var LUT = ee.Dictionary({0.5: ee.Dictionary({'I1': 0.694,'I2': 1.385,'eta': 0.1921}),
                                 0.6: ee.Dictionary({'I1': 0.630,'I2': 1.495,'eta': 0.2348}),
                                 0.7: ee.Dictionary({'I1': 0.560,'I2': 1.627,'eta': 0.2825}),
                                 0.8: ee.Dictionary({'I1': 0.480,'I2': 1.804,'eta': 0.3354}),
                                 0.9: ee.Dictionary({'I1': 0.378,'I2': 2.094,'eta': 0.3991}),
                                 0.95: ee.Dictionary({'I1': 0.302,'I2': 2.360,'eta': 0.4391})});
  
        // extract data from lookup
        var sigmaImage = ee.Dictionary(LUT.get(String(sigma))).toImage();
        var I1 = sigmaImage.select('I1');
        var I2 = sigmaImage.select('I2');
        //new speckle sigma
        var nEta = sigmaImage.select('eta');
        //establish the sigma ranges
        I1 = I1.multiply(xTilde);
        I2 = I2.multiply(xTilde);
  
        //step 3: apply the minimum mean square error (MMSE) filter for pixels in the sigma range
        // MMSE estimator
        var mask = image.select(bandNames).gte(I1).or(image.select(bandNames).lte(I2));
        var z = image.select(bandNames).updateMask(mask);

        stats = z.reduceNeighborhood({reducer: reducers,kernel: ee.Kernel.square(KERNEL_SIZE/2,'pixels'), optimization: 'window'})

        z_bar = stats.select(meanBand);
        varz = stats.select(varBand);
        
        varx = (varz.subtract(z_bar.abs().pow(2).multiply(nEta.pow(2)))).divide(oneImg.add(nEta.pow(2)));
        b = varx.divide(varz);
        //if b is negative set it to zero
        var new_b = b.where(b.lt(0), 0);
        var xHat = oneImg.subtract(new_b).multiply(z_bar.abs()).add(new_b.multiply(z));
  
        // remove the applied masks and merge the retained pixels and the filtered pixels
        xHat = image.select(bandNames).updateMask(retainPixel).unmask(xHat);
        var output = ee.Image(xHat).rename(bandNames);
  return image.addBands(output, null, true);
} 

//---------------------------------------------------------------------------//
// 4. Mono-temporal speckle filter 
//---------------------------------------------------------------------------//

/** Mono-temporal speckle Filter   */
exports.MonoTemporal_Filter = function(coll,KERNEL_SIZE, SPECKLE_FILTER) {

  var _filter = function(image) {
    
      if (SPECKLE_FILTER=='BOXCAR'){
      var _filtered = boxcar(image, KERNEL_SIZE);
    } else if (SPECKLE_FILTER=='LEE'){
        _filtered = leefilter(image, KERNEL_SIZE);
    } else if (SPECKLE_FILTER=='GAMMA MAP'){
        _filtered = gammamap(image, KERNEL_SIZE);
    } else if (SPECKLE_FILTER=='REFINED LEE'){
        _filtered = refinedLee(image);
    } else if (SPECKLE_FILTER=='LEE SIGMA'){
        _filtered = leesigma(image, KERNEL_SIZE);}
  return _filtered;
  }
  return coll.map(_filter);
}
//---------------------------------------------------------------------------//
// 4. Multi-temporal speckle filter
//---------------------------------------------------------------------------//
/* The following Multi-temporal speckle filters are implemented as described in
S. Quegan and J. J. Yu, โ€œFiltering of multichannel SAR images,โ€ 
IEEE Trans Geosci. Remote Sensing, vol. 39, Nov. 2001.*/

/** Multi-temporal boxcar Filter.  */
exports.MultiTemporal_Filter = function(coll,KERNEL_SIZE, SPECKLE_FILTER,NR_OF_IMAGES) {
  
var Quegan = function(image) {
  
    /* this function will filter the collection used for the multi-temporal part
     it takes care of:
        - same image geometry (i.e relative orbit)
        - full overlap of image
        - amount of images taken for filtering 
            -- all before
           -- if not enough, images taken after the image to filter are added */
    var setresample = function (image){
        return image.resample();
    };
    
    var get_filtered_collection = function (image){
      
      
      // filter collection over are and by relative orbit
      var s1_coll = ee.ImageCollection('COPERNICUS/S1_GRD_FLOAT')
                .filterBounds(image.geometry())
                .filter(ee.Filter.eq('instrumentMode', 'IW'))
                .filter(ee.Filter.listContains('transmitterReceiverPolarisation', ee.List(image.get('transmitterReceiverPolarisation')).get(-1)))
                // we need to get this from the image
                //.filter(ee.Filter.and(ee.Filter.eq('transmitterReceiverPolarisation', 'VH'),ee.Filter.eq('transmitterReceiverPolarisation', 'VH')) )
                // we filter for both because of potential change in orbit number around the Equator
                .filter(ee.Filter.or(
                    ee.Filter.eq('relativeOrbitNumber_stop', image.get('relativeOrbitNumber_stop')),
                    ee.Filter.eq('relativeOrbitNumber_stop', image.get('relativeOrbitNumber_start'))
                )).map(setresample)
      
      // a function that takes the image and checks for the overlap
      var check_overlap = function(_image){
          // get all S1 frames from this date intersecting with the image bounds
          var s1 = s1_coll.filterDate(_image.date(), _image.date().advance(1, 'day'))
          // intersect those images with the image to filter
          var intersect = image.geometry().intersection(s1.geometry().dissolve(), 10)
          // check if intersect is sufficient
          var valid_date = ee.Algorithms.If(
            intersect.area(10).divide(image.geometry().area(10)).gt(0.95),
            _image.date().format('YYYY-MM-dd')
          )
          return ee.Feature(null, {'date': valid_date})
      }
      
      
      // this function will pick up the acq dates for fully overlapping acquisitions before the image acquistion
      var dates_before = s1_coll.filterDate('2014-01-01', image.date().advance(1, 'day'))
        .sort('system:time_start', false).limit(5*NR_OF_IMAGES)
        // we check for overlap and sort out partly overlping acquisitions
        .map(check_overlap).distinct('date').aggregate_array('date');
    
      // if the images before are not enough, we add images from after the image acquisition 
      // this will only be the case at the beginning of S1 mission
      var dates = ee.List(
        ee.Algorithms.If(
          dates_before.size().gte(NR_OF_IMAGES),
          dates_before.slice(0, NR_OF_IMAGES),
          s1_coll
            .filterDate(image.date(), '2100-01-01')
            .sort('system:time_start', true).limit(5*NR_OF_IMAGES)
            .map(check_overlap)
            .distinct('date')
            .aggregate_array('date')
            .cat(dates_before).distinct().sort().slice(0, NR_OF_IMAGES)
        )
      )
    
      // now we re-filter the collection to get the right acquisitions for multi-temporal filtering
      return ee.ImageCollection(dates.map(function(date){
        return s1_coll.filterDate(date, ee.Date(date).advance(1,'day')).toList(s1_coll.size())
      }).flatten())
      
    }
          
  
  // we get our dedicated image collection for that image
  var s1 = get_filtered_collection(image)
  
  var bands = image.bandNames().remove('angle');
  s1 = s1.select(bands)
  var meanBands = bands.map(function(bandName){return ee.String(bandName).cat('_mean')});
  var ratioBands = bands.map(function(bandName){return ee.String(bandName).cat('_ratio')});
  var count_img = s1.reduce(ee.Reducer.count());
  //estimate means and ratios
  var inner = function(image){
    if (SPECKLE_FILTER=='BOXCAR'){
      var _filtered = boxcar(image, KERNEL_SIZE).select(bands).rename(meanBands); }
    else if (SPECKLE_FILTER=='LEE'){
      _filtered = leefilter(image, KERNEL_SIZE).select(bands).rename(meanBands);}
    else if (SPECKLE_FILTER=='GAMMA MAP'){
      _filtered = gammamap(image, KERNEL_SIZE).select(bands).rename(meanBands);} 
    else if (SPECKLE_FILTER=='REFINED LEE'){
      _filtered = refinedLee(image).select(bands).rename(meanBands);} 
    else if (SPECKLE_FILTER=='LEE SIGMA'){
      _filtered = leesigma(image, KERNEL_SIZE).select(bands).rename(meanBands);}
   
    var _ratio = image.select(bands).divide(_filtered).rename(ratioBands); 
  return _filtered.addBands(_ratio);
  }
  //perform Quegans filter
  var isum = s1.map(inner).select(ratioBands).reduce(ee.Reducer.sum());
  var filter = inner(image).select(meanBands);
  var divide = filter.divide(count_img);
  var output = divide.multiply(isum).rename(bands);

  return image.addBands(output, null, true)
  }
  return coll.map(Quegan);

};

Radiometric Terrain Correction

//---------------------------------------------------------------------------//
// Terrain Flattening
//---------------------------------------------------------------------------//
/* Vollrath, A., Mullissa, A., & Reiche, J. (2020). Angular-Based Radiometric Slope Correction for Sentinel-1 on Google Earth Engine. 
Remote Sensing, 12(11), [1867]. https://doi.org/10.3390/rs12111867
*/ 
exports.slope_correction = function(collection, TERRAIN_FLATTENING_MODEL, 
                                              DEM,
                                              TERRAIN_FLATTENING_ADDITIONAL_LAYOVER_SHADOW_BUFFER) {

  var ninetyRad = ee.Image.constant(90).multiply(Math.PI/180);

  var _volumetric_model_SCF = function(theta_iRad, alpha_rRad) {
      // Volume model
      var nominator = (ninetyRad.subtract(theta_iRad).add(alpha_rRad)).tan();
      var denominator = (ninetyRad.subtract(theta_iRad)).tan();
      return nominator.divide(denominator);
  }

  var _direct_model_SCF = function(theta_iRad, alpha_rRad, alpha_azRad) {
      // Surface model
      var nominator = (ninetyRad.subtract(theta_iRad)).cos();
      var denominator = alpha_azRad.cos()
        .multiply((ninetyRad.subtract(theta_iRad).add(alpha_rRad)).cos());
      return nominator.divide(denominator);
  }
  
  var _erode = function(image, distance)  {
     // buffer function (thanks Noel)
      var d = (image.not().unmask(1)
          .fastDistanceTransform(30).sqrt()
          .multiply(ee.Image.pixelArea().sqrt()));

      return image.updateMask(d.gt(distance));
    }
  
  var _masking = function(alpha_rRad, theta_iRad, buffer){
        // calculate masks
        // layover, where slope > radar viewing angle
        var layover = alpha_rRad.lt(theta_iRad).rename('layover');
        // shadow
        var shadow = alpha_rRad.gt(ee.Image.constant(-1).multiply(ninetyRad.subtract(theta_iRad))).rename('shadow');
        // combine layover and shadow
        var mask = layover.and(shadow);
        // add buffer to final mask
        if (buffer > 0)
            mask = _erode(mask, buffer);
        return mask.rename('no_data_mask');
   }

  var _correct = function(image) {
        var bandNames = image.bandNames();
        // get the image geometry and projection
        var geom = image.geometry()
        var proj = image.select(1).projection()
        
        var elevation = DEM.resample('bilinear').reproject({crs:proj, scale:10}).clip(geom)
        
        // calculate the look direction
        var heading = (ee.Terrain.aspect(image.select('angle'))
                                     .reduceRegion(ee.Reducer.mean(),image.geometry(),1000))
        
        // in case of null values for heading replace with 0
        heading = ee.Dictionary(heading).combine({aspect: 0}, false).get('aspect')
    
        heading = ee.Algorithms.If(
            ee.Number(heading).gt(180),
            ee.Number(heading).subtract(360),
            ee.Number(heading)
        )
        // the numbering follows the article chapters
        // 2.1.1 Radar geometry 
        var theta_iRad = image.select('angle').multiply(Math.PI/180)
        var phi_iRad = ee.Image.constant(heading).multiply(Math.PI/180)
        
        // 2.1.2 Terrain geometry
        //slope 
        var alpha_sRad = ee.Terrain.slope(elevation).select('slope').multiply(Math.PI / 180)

        // aspect (-180 to 180)
        var aspect = ee.Terrain.aspect(elevation).select('aspect').clip(geom)

        // we need to subtract 360 degree from all values above 180 degree
        var aspect_minus = aspect
          .updateMask(aspect.gt(180))
          .subtract(360)

        // we fill the aspect layer with the subtracted values from aspect_minus
        var phi_sRad = aspect
          .updateMask(aspect.lte(180))
          .unmask() 
          .add(aspect_minus.unmask()) //add the minus values
          .multiply(-1)   // make aspect uphill
          .multiply(Math.PI / 180) // make it rad
        
        // we get the height, for export 
        var height = DEM.reproject(proj).clip(geom)
        
        
        // 2.1.3 Model geometry
        //reduce to 3 angle
        var phi_rRad = phi_iRad.subtract(phi_sRad)

        // slope steepness in range (eq. 2)
        var alpha_rRad = (alpha_sRad.tan().multiply(phi_rRad.cos())).atan()

        // slope steepness in azimuth (eq 3)
        var alpha_azRad = (alpha_sRad.tan().multiply(phi_rRad.sin())).atan()

        // local incidence angle (eq. 4)
        var theta_liaRad = (alpha_azRad.cos().multiply((theta_iRad.subtract(alpha_rRad)).cos())).acos()
        var theta_liaDeg = theta_liaRad.multiply(180/Math.PI)

        // 2.2 
        // Gamma_nought
        var gamma0 = image.divide(theta_iRad.cos())

        if (TERRAIN_FLATTENING_MODEL == 'VOLUME') {
            // Volumetric Model
            var scf = _volumetric_model_SCF(theta_iRad, alpha_rRad)
        }
        
        if (TERRAIN_FLATTENING_MODEL == 'DIRECT') {
            var scf = _direct_model_SCF(theta_iRad, alpha_rRad, alpha_azRad)
        }
        // apply model for Gamm0
        var gamma0_flat = gamma0.multiply(scf)

        // get Layover/Shadow mask
        var mask = _masking(alpha_rRad, theta_iRad, TERRAIN_FLATTENING_ADDITIONAL_LAYOVER_SHADOW_BUFFER);

        var output = gamma0_flat.mask(mask).rename(bandNames).copyProperties(image);
        
        output = ee.Image(output).addBands(image.select('angle'),null,true);
        
        return output.set('system:time_start', image.get('system:time_start')); 
  }   
  return collection.map(_correct)
}

Utilities

// File: utilities.js
// Version: v1.2
// Date: 2021-02-11
// Authors: Mullissa A., Vollrath A.,  Reiche J., Slagter B., Balling J. , Gou Y., Braun, C.

//---------------------------------------------------------------------------//
// Linear to db scale 
//---------------------------------------------------------------------------//
  
/** Convert backscatter from linear to dB. */
exports.lin_to_db = function(image) {
  var bandNames = image.bandNames().remove('angle');
  var db = ee.Image.constant(10).multiply(image.select(bandNames).log10()).rename(bandNames)
  return image.addBands(db, null, true)
};

/** Convert backscatter from linear to dB. */
exports.db_to_lin = function(image) {
  var bandNames = image.bandNames().remove('angle');
  var lin = ee.Image.constant(10).pow(image.select(bandNames).divide(10)).rename(bandNames)
  return image.addBands(lin, null, true)
};

/*Converts the linear image to db by excluding the ratio bands */
exports.lin_to_db2 = function(image) {
  var db = ee.Image.constant(10).multiply(image.select(['VV', 'VH']).log10()).rename(['VV', 'VH']);
  return image.addBands(db, null, true)
}

//---------------------------------------------------------------------------//
// Prepare ratio band for linear image
//---------------------------------------------------------------------------//
exports.add_ratio_lin = function(image){
      var ratio = image.addBands(image.select('VV').divide(image.select('VH')).rename('VVVH_ratio'));
      return ratio.set('system:time_start', image.get('system:time_start'))
  }


//---------------------------------------------------------------------------//
// Export assets
//---------------------------------------------------------------------------//
var get_options = function(def, options) {
  // default values if undefined
  if (options !== undefined) {
    var opt = options
    for (var key in def) {
      var value = def[key]
      if (opt[key] === undefined) {opt[key] = value}
    }
  } else {var opt = def}
  return opt
}

/*exports individual images to an asset. 
adopted from https://github.com/fitoprincipe/geetools-code-editor */
var Download = {'ImageCollection': {}, 'Table': {}}
Download.ImageCollection.toAsset = function(collection, assetFolder, options) {
  var root = ee.data.getAssetRoots()[0]['id']
  var folder = assetFolder
  if (folder !== null && folder !== undefined) {
    var assetFolder = root+'/'+folder ;
  } else {
    var assetFolder = root
  }
  var defaults = {
      name: null,
      scale: 1000,
      maxPixels: 1e13,
      region: null,
    }
  var opt = get_options(defaults, options)
  var n = collection.size().getInfo();
    
  var colList = collection.toList(n);
  
  var colID = opt.name || collection.getInfo()['id'] || "ImageCollection"
  colID = colID.replace('/','_')
  
  for (var i = 0; i < n; i++) {
    var img = ee.Image(colList.get(i));
    var id = img.id().getInfo() || 'image_'+i.toString();
    var region = opt.region || img.geometry().bounds().getInfo()["coordinates"];
    
    Export.image.toAsset({
      image: img,
      description: id,
      assetId: assetFolder+colID+'_'+id,
      region: region,
      scale: opt.scale,
      maxPixels: opt.maxPixels})
  }
}

exports.Download = Download;

S1 CCDC

// make two identical s1 collections, one dB and one Linear
var grdFloat = ee.ImageCollection('COPERNICUS/S1_GRD_FLOAT')
        .filter(ee.Filter.eq('instrumentMode', 'IW'))
        .filter(ee.Filter.eq('resolution_meters', 10))
        .filter(ee.Filter.date('2014-10-01', '2023-04-01'))
        .filter(ee.Filter.bounds(geometry2))
        .select(['VV', 'angle']);

var grd = ee.ImageCollection('COPERNICUS/S1_GRD')
        .filter(ee.Filter.eq('instrumentMode', 'IW'))
        .filter(ee.Filter.eq('resolution_meters', 10))
        .filter(ee.Filter.date('2014-10-01', '2023-04-01'))
        .filter(ee.Filter.bounds(geometry2))
        .map(function(image) {
          var edge = image.lt(-30.0);
          var maskedImage = image.mask().and(edge.not());
          return image.updateMask(maskedImage);
        })
        .select(['VV', 'angle']);

// filter based on pass
var desc = grd.filter(ee.Filter.eq('orbitProperties_pass', 'DESCENDING'));
var asc = grd.filter(ee.Filter.eq('orbitProperties_pass', 'ASCENDING'));
print('ASCENDING pass contains ', asc.size(), 'images');
print('DESCENDING pass contains ', desc.size(), 'images');

// determine which pass has the most observations
var flag = ee.Number(desc.size()).gt(ee.Number(asc.size()));  // if you want the opposite pass for some reason just change gt to lt
var s = ee.Algorithms.If(flag, "DESCENDING", "ASCENDING");
print(s, 'is the most complete pass.');

// make a collection with that pass
var s1_pass = grd.filter(ee.Filter.eq('orbitProperties_pass', s));

// get the orbits in that pass
var orbits = s1_pass.aggregate_array('relativeOrbitNumber_start').distinct();
print('Orbits:', orbits);

// for each orbit, make a feature collection with the image footprints
var orbitFeatureCollections = orbits.map(function(n) {
  var s1_pass_orbit = s1_pass.filter(ee.Filter.eq('relativeOrbitNumber_start', n));
  var s1_pass_orbit_footprints = ee.FeatureCollection(s1_pass_orbit.aggregate_array('system:footprint').map(function(f) {
    return ee.Feature(ee.Geometry(f));
  }));
  return s1_pass_orbit_footprints;
});

Map.centerObject(geometry2, 16);

// UPDATE: need to change the next lines depending on the number of orbits
Map.addLayer(ee.FeatureCollection(orbitFeatureCollections.get(0)), {color:'blue'}, ee.Number(orbits.get(0)).int().format().getInfo(), false);
Map.addLayer(ee.FeatureCollection(orbitFeatureCollections.get(1)), {color:'red'}, ee.Number(orbits.get(1)).int().format().getInfo(), false);

// now get the image collections for these orbits
// UPDATE: need to change the next lines depending on the number of orbits
var s1_pass_orbit0 = s1_pass.filter(ee.Filter.eq('relativeOrbitNumber_start', orbits.get(0)));
var s1_pass_orbit1 = s1_pass.filter(ee.Filter.eq('relativeOrbitNumber_start', orbits.get(1)));

// UPDATE: print some information about these orbital collections
// get the number of images.
print('Orbit #: ', orbits.get(0));
var s1_pass_orbit0_count = s1_pass_orbit0.size();
print('Count: ', s1_pass_orbit0_count);
// get the date range of images in the collection.
var s1_pass_orbit0_range = s1_pass_orbit0.reduceColumns(ee.Reducer.minMax(), ['system:time_start']);
print('Date range: ', ee.Date(s1_pass_orbit0_range.get('min')), ee.Date(s1_pass_orbit0_range.get('max')));

// get the number of images.
print('Orbit #: ', orbits.get(1));
var s1_pass_orbit1_count = s1_pass_orbit1.size();
print('Count: ', s1_pass_orbit1_count);
// get the date range of images in the collection.
var s1_pass_orbit1_range = s1_pass_orbit1.reduceColumns(ee.Reducer.minMax(), ['system:time_start']);
print('Date range: ', ee.Date(s1_pass_orbit1_range.get('min')), ee.Date(s1_pass_orbit1_range.get('max')));

// add median composites of the orbits to the map for reference
Map.addLayer(s1_pass_orbit0.first().select('VV'), {min:-20, max:0, palette: ['#023858', '#045d92', '#1379b5', '#519bc6', '#91b4d6', '#c4cbe3', '#e8e4f0', '#fff7fb']}, ee.String('Raw Orbit ').cat(ee.Number(orbits.get(0)).int().format()).getInfo(), false);
Map.addLayer(s1_pass_orbit1.first().select('VV'), {min:-20, max:0, palette: ['#023858', '#045d92', '#1379b5', '#519bc6', '#91b4d6', '#c4cbe3', '#e8e4f0', '#fff7fb']}, ee.String('Raw Orbit ').cat(ee.Number(orbits.get(1)).int().format()).getInfo(), false);

// border noise correction, speckle filtering, and terrain correction on each collection
var speckle_filter = require('users/benhuffwork/s1_ard:speckle_filter');
var terrain_flattening = require('users/benhuffwork/s1_ard:terrain_flattening');
var border_noise_correction = require('users/benhuffwork/s1_ard:border_noise_correction');

var s1_float_pass = grdFloat.filter(ee.Filter.eq('orbitProperties_pass', s));

var s1_float_pass_orbit0 = s1_float_pass.filter(ee.Filter.eq('relativeOrbitNumber_start', orbits.get(0)));
var s1_float_pass_orbit1 = s1_float_pass.filter(ee.Filter.eq('relativeOrbitNumber_start', orbits.get(1)));

// border noise correction
var s1_float_pass_orbit0_bnc = s1_float_pass_orbit0.map(border_noise_correction.f_mask_edges); 
var s1_float_pass_orbit1_bnc = s1_float_pass_orbit1.map(border_noise_correction.f_mask_edges); 
print('Border noise correction completed...');

// mono-temporal speckle filtering
var s1_float_pass_orbit0_bnc_sf = ee.ImageCollection(speckle_filter.MonoTemporal_Filter(s1_float_pass_orbit0_bnc, 5, 'REFINED LEE'));
var s1_float_pass_orbit1_bnc_sf = ee.ImageCollection(speckle_filter.MonoTemporal_Filter(s1_float_pass_orbit1_bnc, 5, 'REFINED LEE'));
print('Mono-temporal speckle filtering completed...');

// radiometric terrain correction
var dem = ee.Image('USGS/SRTMGL1_003');
var s1_float_pass_orbit0_bnc_sf_rtc = ee.ImageCollection(terrain_flattening.slope_correction(s1_float_pass_orbit0_bnc_sf, 'VOLUME', dem, 0));
var s1_float_pass_orbit1_bnc_sf_rtc = ee.ImageCollection(terrain_flattening.slope_correction(s1_float_pass_orbit1_bnc_sf, 'VOLUME', dem, 0));
print('Radiometric terrain correction completed...');

// convert the linear images to dB
var s1_float_pass_orbit0_bnc_sf_rtc_db = s1_float_pass_orbit0_bnc_sf_rtc.map(function(image) {
  var bandNames = image.bandNames().remove('angle');
  var db = ee.Image.constant(10).multiply(image.select(bandNames).log10()).rename(bandNames);
  return image.addBands(db, null, true);
});

var s1_float_pass_orbit1_bnc_sf_rtc_db = s1_float_pass_orbit1_bnc_sf_rtc.map(function(image) {
  var bandNames = image.bandNames().remove('angle');
  var db = ee.Image.constant(10).multiply(image.select(bandNames).log10()).rename(bandNames);
  return image.addBands(db, null, true);
});

// clip to aoi
var s1_float_pass_orbit0_bnc_sf_rtc_db_clip = s1_float_pass_orbit0_bnc_sf_rtc_db.map(function(image) {
  return image.clip(geometry2);
});

var s1_float_pass_orbit1_bnc_sf_rtc_db_clip = s1_float_pass_orbit1_bnc_sf_rtc_db.map(function(image) {
  return image.clip(geometry2);
});

// add median composites of the preprocessed orbits to the map for reference
Map.addLayer(s1_float_pass_orbit0_bnc_sf_rtc_db.first().select('VV'), {min:-20, max:0, palette: ['#023858', '#045d92', '#1379b5', '#519bc6', '#91b4d6', '#c4cbe3', '#e8e4f0', '#fff7fb']}, ee.String('Analysis Ready Orbit ').cat(ee.Number(orbits.get(0)).int().format()).getInfo(), false);
Map.addLayer(s1_float_pass_orbit1_bnc_sf_rtc_db.first().select('VV'), {min:-20, max:0, palette: ['#023858', '#045d92', '#1379b5', '#519bc6', '#91b4d6', '#c4cbe3', '#e8e4f0', '#fff7fb']}, ee.String('Analysis Ready Orbit ').cat(ee.Number(orbits.get(1)).int().format()).getInfo(), false);


// chart the observations at a single point in both orbits
var chart = ui.Chart.image.series(s1_pass_orbit0.map(function(image){return image.clip(geometry2)}).select('VV'), geometry);
var chartStyle = {
  title: ee.String('Raw Orbit ').cat(ee.Number(orbits.get(0)).int().format()).getInfo(),
  series: {
    0: {lineWidth: 0, color: 'orange', pointSize: 1},
    1: {lineWidth: 0, color: 'green', pointSize: 1},
    2: {lineWidth: 0, color: 'blue', pointSize: 1}
  }
};
chart.setOptions(chartStyle);
print(chart);

var chart = ui.Chart.image.series(s1_pass_orbit1.map(function(image){return image.clip(geometry2)}).select('VV'), geometry);
var chartStyle = {
  title: ee.String('Raw Orbit ').cat(ee.Number(orbits.get(1)).int().format()).getInfo(),
  series: {
    0: {lineWidth: 0, color: 'orange', pointSize: 1},
    1: {lineWidth: 0, color: 'green', pointSize: 1},
    2: {lineWidth: 0, color: 'blue', pointSize: 1}
  }
};
chart.setOptions(chartStyle);
print(chart);

// chart the observations at a single point in both orbits
var chart = ui.Chart.image.series(s1_float_pass_orbit0_bnc_sf_rtc_db_clip.select('VV'), geometry);
var chartStyle = {
  title: ee.String('Analysis Ready Orbit ').cat(ee.Number(orbits.get(0)).int().format()).getInfo(),
  series: {
    0: {lineWidth: 0, color: 'orange', pointSize: 1},
    1: {lineWidth: 0, color: 'green', pointSize: 1},
    2: {lineWidth: 0, color: 'blue', pointSize: 1}
  }
};
chart.setOptions(chartStyle);
print(chart);

var chart = ui.Chart.image.series(s1_float_pass_orbit1_bnc_sf_rtc_db_clip.select('VV'), geometry);
var chartStyle = {
  title: ee.String('Analysis Ready Orbit ').cat(ee.Number(orbits.get(1)).int().format()).getInfo(),
  series: {
    0: {lineWidth: 0, color: 'orange', pointSize: 1},
    1: {lineWidth: 0, color: 'green', pointSize: 1},
    2: {lineWidth: 0, color: 'blue', pointSize: 1}
  }
};
chart.setOptions(chartStyle);
print(chart);


// ccdc on one of the orbits
var ccdc = ee.Algorithms.TemporalSegmentation.Ccdc({
  collection: s1_float_pass_orbit0_bnc_sf_rtc_db_clip, 
  breakpointBands: ['VV'], 
  tmaskBands: null, 
  minObservations: 6, 
  chiSquareProbability: 0.99, 
  minNumOfYearsScaler: 1.33, 
  dateFormat: 2, 
  lambda: 20, 
  maxIterations: 25000
});

var changeTime = ccdc.select('tBreak').arrayReduce(ee.Reducer.first(), [0]).arrayFlatten([['first']]).selfMask();
var changeTimeImage = changeTime.clip(geometry2);
Export.image.toDrive({image: changeTimeImage, scale: 10, crs: 'EPSG:4326', maxPixels: 100000000, fileNamePrefix: 'orbit34'})

// ccdc on one of the orbits
var ccdc = ee.Algorithms.TemporalSegmentation.Ccdc({
  collection: s1_float_pass_orbit1_bnc_sf_rtc_db_clip, 
  breakpointBands: ['VV'], 
  tmaskBands: null, 
  minObservations: 6, 
  chiSquareProbability: 0.99, 
  minNumOfYearsScaler: 1.33, 
  dateFormat: 2, 
  lambda: 20, 
  maxIterations: 25000
});

var changeTime = ccdc.select('tBreak').arrayReduce(ee.Reducer.first(), [0]).arrayFlatten([['first']]).selfMask();
var changeTimeImage = changeTime.clip(geometry2);
Export.image.toDrive({image: changeTimeImage, crs: 'EPSG:4326', scale: 10, maxPixels: 100000000, fileNamePrefix: 'orbit107'})

/*
// MAGNITUDE
var changeTime2 = ccdc.select('tBreak').arrayReduce(ee.Reducer.max(), [0]).arrayFlatten([['last']]).selfMask()
var changeTimeImage2 = changeTime2.clip(geometry2);

var minMax = changeTimeImage2.reduceRegion({
  reducer: ee.Reducer.minMax(),
  geometry: geometry2,
  scale: 100,
  maxPixels: 1e9
}); 
Map.addLayer(changeTimeImage2, {palette:['#ffffcc','#ffeda0','#fed976','#feb24c','#fd8d3c','#fc4e2a','#e31a1c','#bd0026','#800026'], min: minMax.get('last_min').getInfo(), max: minMax.get('last_max').getInfo()}, 'Last Change');
*/

Results look something like this:
ccdc_change_example2
ccdc_change_example

Recommend Projects

  • React photo React

    A declarative, efficient, and flexible JavaScript library for building user interfaces.

  • Vue.js photo Vue.js

    ๐Ÿ–– Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.

  • Typescript photo Typescript

    TypeScript is a superset of JavaScript that compiles to clean JavaScript output.

  • TensorFlow photo TensorFlow

    An Open Source Machine Learning Framework for Everyone

  • Django photo Django

    The Web framework for perfectionists with deadlines.

  • D3 photo D3

    Bring data to life with SVG, Canvas and HTML. ๐Ÿ“Š๐Ÿ“ˆ๐ŸŽ‰

Recommend Topics

  • javascript

    JavaScript (JS) is a lightweight interpreted programming language with first-class functions.

  • web

    Some thing interesting about web. New door for the world.

  • server

    A server is a program made to process requests and deliver data to clients.

  • Machine learning

    Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.

  • Game

    Some thing interesting about game, make everyone happy.

Recommend Org

  • Facebook photo Facebook

    We are working to build community through open source technology. NB: members must have two-factor auth.

  • Microsoft photo Microsoft

    Open source projects and samples from Microsoft.

  • Google photo Google

    Google โค๏ธ Open Source for everyone.

  • D3 photo D3

    Data-Driven Documents codes.