https://code.earthengine.google.com/
https://developers.google.com/earth-engine/python_install
https://github.com/mortcanty/CRC4Docker
!earthengine
%matplotlib inline
import ee, time
ee.Initialize()
try:
point = ee.Geometry.Point([8.5,50.0])
start = ee.Date('2016-05-01')
finish = ee.Date('2016-08-01')
collection = ee.ImageCollection('COPERNICUS/S2') \
.filterBounds(point) \
.filterDate(start, finish) \
.sort('CLOUD_COVERAGE_ASSESSMENT', True)
count = collection.size().getInfo()
if count==0:
raise ValueError('No images found')
image = ee.Image(collection.first())
timestamp = ee.Date(image.get('system:time_start')).getInfo()
timestamp = time.gmtime(int(timestamp['value'])/1000)
timestamp = time.strftime('%c', timestamp)
systemid = image.get('system:id').getInfo()
cloudcover = image.get('CLOUD_COVERAGE_ASSESSMENT').getInfo()
projection = image.select('B2').projection().getInfo()['crs']
print 'system id: %s'%systemid
print 'acquisition time: %s'%timestamp
print 'cloud cover (percent):%s'%cloudcover
print 'projection: %s'%projection
except Exception as e:
print 'An error occurred: %s'%e
maxLat = 50.06526459341422
minLon = 8.477334975832491
minLat = 50.01013697421883
maxLon = 8.633890151613743
rect = ee.Geometry.Rectangle(minLon,minLat,maxLon,maxLat)
exportname = 'users/mortcanty/'+systemid.replace('/','-')
assexport = ee.batch.Export.image.toAsset(image.clip(rect).select('B2','B3','B4','B8'),
description='assetExportTask',
assetId=exportname,scale=10,maxPixels=1e9)
assexportid = str(assexport.id)
print '****Exporting to Assets, task id: %s '%assexportid
assexport.start()
list = ee.List([1,2,3,4])
def squareit(x):
x = ee.Number(x)
return x.multiply(x)
squaredlist = list.map(squareit)
print squaredlist.getInfo()
# Image.reduceRegion example
#
# Computes a simple reduction over a region of an image. A reduction
# is any process that takes an arbitrary number of inputs (such as
# all the pixels of an image in a given region) and computes one or
# more fixed outputs. The result is a dictionary that contains the
# computed values, which in this example is the maximum pixel value
# in the region.
#
# This example shows how to print the resulting dictionary to the
# console, which is useful when developing and debugging your
# scripts, but in a larger workflow you might instead use the
# Dicitionary.get() function to extract the values print np.max(band1)you need from the
# dictionary for use as inputs to other functions.
# The input image to reduce, in this case an SRTM elevation map.
image = ee.Image('srtm90_v4')
# The region to reduce within.
poly = ee.Geometry.Rectangle(-109.05, 41, -102.05, 37)
# Reduce the image within the given region, using a reducer that
# computes the max pixel value. We also specify the spatial
# resolution at which to perform the computation, in this case 200
# meters.
max = image.reduceRegion(ee.Reducer.max(), poly, 200)
# Print the result to the console.
print max.getInfo()
# Pure Python
sum = 0
for i in range(10):
sum += i
print sum
# numpy
import numpy as np
print np.array(range(10)).sum()
# ee Python API
def sum(current,prev):
prev = ee.Number(prev)
return prev.add(current)
input = ee.List.sequence(0,9)
first = ee.Number(0)
result = input.iterate(sum,first)
print result
print result.getInfo()
import ee
from auxil import eepca
import IPython.display as disp
ee.Initialize()
im = ee.Image(
'LANDSAT/LE07/C01/T1_RT_TOA/LE07_197025_20010626').select('B1','B2','B3','B4','B5','B7')
pcs, lambdas = eepca.pca(im)
url = pcs.select('pc1','pc2','pc3').getThumbURL({'min':-0.1,'max':0.1})
disp.Image(url=url)
gdexporttask = ee.batch.Export.image.toDrive(pcs,
description='driveExportTask',
folder='EarthEngineImages',
fileNamePrefix='PCA_20010626',
scale=30,
maxPixels=1e9)
gdexporttask.start()
im = ee.Image(
'LANDSAT/LE07/C01/T1_RT_TOA/LE07_197025_20010626') \
.select('B4')
edges = ee.Algorithms.CannyEdgeDetector(im,0.2)
gdexporttask = ee.batch.Export.image.toAsset(edges,
description='assetExportTask',
assetId='users/mortcanty/edges',
scale=30,
maxPixels=1e9)
gdexporttask.start()
# Load a Landsat 8 top-of-atmosphere reflectance image.
image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318')
# Convert the RGB bands to the HSV color space.
hsv = image.select(['B4', 'B3', 'B2']).rgbToHsv()
# Swap in the panchromatic band and convert back to RGB.
sharpened = ee.Image.cat([
hsv.select('hue'), hsv.select('saturation'), image.select('B8')
]).hsvToRgb()
gdexporttask = ee.batch.Export.image.toAsset(sharpened,
description='assetExportTask',
assetId='users/mortcanty/pansharpened',
scale=15,
maxPixels=1e9)
gdexporttask.start()
A fully polarimetric SAR measures a $2\times 2$ scattering matrix $S$ at each resolution cell on the ground. The scattering matrix relates the incident and the backscattered electric fields $E^i$ and $E^b$ according to
$$ \pmatrix{E_h^b \cr E_v^b} =\pmatrix{S_{hh} & S_{hv}\cr S_{vh} & S_{vv}}\pmatrix{E_h^i \cr E_v^i}. $$
Here $E_h^{i(b)}$ and $E_v^{i(b)}$ denote the horizontal and vertical components of the incident (backscattered) oscillating electric fields directly at the target. These can be deduced from the transmitted and received radar signals via the so-called far field approximations. If both horizontally and vertically polarized radar pulses are emitted and discriminated then they determine, from the above Equation, the four complex scattering matrix elements. The per-pixel polarimetric information in the scattering matrix $S$, under the assumption of reciprocity ($S_{hv} = S_{vh}$), can then be expressed as a three-component complex vector
$$ s = \pmatrix{S_{hh}\cr \sqrt{2}S_{hv}\cr S_{vv}}, $$
where the $\sqrt{2}$ ensures that the total intensity (received signal power) is consistent. It is essentially these vectors which are provided in the SLC level one data discussed above. The total intensity is referred to as the span and is the complex inner product of the vector $s$,
$$ {\rm span} = s^\top s = |S_{hh}|^2 + 2|S_{hv}|^2 + |S_{vv}|^2. $$
This is a real number and the corresponding gray-scale image is called the span image. The observation vector $s$ can be shown to be a realization of a complex multivariate normal random variable. An equivalent and often preferred representation is in terms of the coherency vector or Pauli decomposition
$$ k = {1\over\sqrt{2}}\pmatrix{S_{hh} + S_{vv}\cr S_{hh} - S_{vv} \cr 2S_{hv}}. $$
The polarimetric signal is can also be represented by taking the complex outer product of $s$ with itself:
$$ C = s s^\top = \pmatrix{ |S_{hh}|^2 & \sqrt{2}S_{hh}S_{hv}^* & S_{hh}S_{vv}^* \cr \sqrt{2}S_{hv}S_{hh}^* & 2|S_{hv}|^2 & \sqrt{2}S_{hv}S_{vv}^* \cr S_{vv}S_{hh}^* & \sqrt{2}S_{vv}S_{hv}^* & |S_{vv}|^2 }. $$
The diagonal elements of $C$ are real numbers, with span $= {\rm tr}(C)$, and the off-diagonal elements are complex. This matrix representation contains all of the information in the polarized signal.
The matrix $C$ can be averaged over the number of looks (number of adjacent cells used to average out the effect of speckle) to give an estimate of the covariance matrix of each multi-look pixel:
$$ \bar{C} ={1\over m}\sum_{\nu=1}^m s(\nu) s(\nu)^\top = \langle s s^\top \rangle = \pmatrix{ \langle |S_{hh}|^2\rangle & \langle\sqrt{2}S_{hh}S_{hv}^*\rangle & \langle S_{hh}S_{vv}^*\rangle \cr \langle\sqrt{2} S_{hv}S_{hh}^*\rangle & \langle 2|S_{hv}|^2\rangle & \langle\sqrt{2}S_{hv}S_{vv}^*\rangle \cr \langle S_{vv}S_{hh}^*\rangle & \langle\sqrt{2}S_{vv}S_{hv}^*\rangle & \langle |S_{vv}|^2\rangle }, $$
where $m$ is the number of looks. This matrix (or alternatively the equivalent coherency matrix $\langle k k^\top \rangle$) can be generated with the Sentinel-1 Toolbox. Rewriting the first of the above equalities,
$$ m\bar{C} = \sum_{\nu=1}^m s(\nu) s(\nu)^\top =: x. $$
This quantity $x$ is a realization of a complex random matrix which is known to have a complex Wishart distribution with $N\times N$ covariance matrix $\Sigma$ (here $N=3$) and $m$ degrees of freedom:
$$ p_{W_c}( x) ={|x|^{(m-N)}\exp(-{\rm tr}(\Sigma^{-1} x)) \over \pi^{N(N-1)/2}|\Sigma|^{m}\prod_{i=1}^N\Gamma(m+1-i)},\quad m \ge N, $$
provided that the vectors $s(\nu)$ are independent and drawn from a complex multivariate normal distribution.
The scattering vector given above corresponds to so-called full, or quad polarimetric SAR. Satellite-based SAR sensors often operate in reduced, power-saving polarization modes, emitting only one polarization and receiving two (dual polarization) or one (single polarization). The look-averaged covariance matrices are reduced in dimension correspondingly. For instance for dual polarization with horizontal transmission and horizontal and vertical reception,
$$ \bar{C} = \pmatrix{ \langle |S_{hh}|^2\rangle & \langle S_{hh}S_{hv}^*\rangle \cr \langle S_{hv}S_{hh}^*\rangle & \langle |S_{hv}|^2\rangle }, $$
and, for single polarization and horizontal transmission/reception, we get simply the intensity image
$$ \bar{I} = \langle |S_{hh}|^2\rangle \quad {\rm or} \quad \bar{I} = \langle |S_{vv}|^2\rangle. $$
A special case, relevant to Sentinel-1 data on the GEE is vertical transmission, vertical and horizontal reception without including the off diagonal complex element:
$$ \bar{C} = \pmatrix{ \langle |S_{vv}|^2\rangle & 0 \cr 0 & \langle |S_{vh}|^2\rangle } $$
referred to as dualpol, diagonal only.
The following is discussion is based on Conradsen et al (2003).
As we have seen, we can represent a pixel vector in an $m$ look-averaged polSAR image in covariance matrix format by $\bar C$, where
$$ m\bar C = x = \sum_{\nu=1}^m s(\nu) s(\nu)^\top $$
is a realization of a random matrix $X$ with a complex Wishart distribution.
In order to derive a change detection procedure for two polarimetric SAR images, we formulate a statistical test. For each pixel in $k$ images, define the null (or no-change) simple hypothesis as
$$ H_0:\quad \Sigma_1 = \Sigma_2 = \dots = \Sigma_k = \Sigma, $$
and the alternative composite hypothesis as
$$ H_1:\quad \Sigma_i \ne \Sigma_j $$ for at least one pair $i,j$
Then the likelihood ratio test has the critical region for rejection of the no-change hypothesis
$$ Q = k^{kNm}{ |x_1|^m |x_2|^m\cdots |x_k|^m \over |x_1 + x_2 + \dots x_k|^{km} } \le t $$
where $t$ is some small number and $N$ is the dimension of $x$.
Taking logarithms:
$$ \ln Q = m(Nk\ln k + \sum_{i=1}^k \ln|x_i| - k \ln|x|) \le t' $$
where $x = \sum_{i=1}^k x_i$.
Finally, we have the following approximation for the statistical distribution of the test statistic $Q$:
$$ {\rm prob}(-2\log Q\le z) \simeq P_{\chi^2;(k-1)f}(z) $$
where $f$ is the number of parameters requred to specify $x$: 9 for quadpol, 4 for dualpol and 2 for dualpol diagonal only.
In practice we choose a significance level $\alpha$, e.g., $\alpha = 0.01$, and decision threshold $z$ such that
$$ {\rm prob}(-2\log Q\le z) = 1-\alpha $$
and interpret all pixels with larger values of $-2\rho\log Q$ as change.
Furthermore this test can be factored into a sequence of tests involving hypotheses of the form $\Sigma_1 = \Sigma_2$ against $\Sigma_1 \ne \Sigma_2$, $\Sigma_1 = \Sigma_2 = \Sigma_3$ against $\Sigma_1 = \Sigma_2 \ne \Sigma_3$, and so forth. For example, to test
$$ H_{0j}: \quad \Sigma_1 = \Sigma_2 = \dots \Sigma_{j-1} = \Sigma_j $$
against
$$ H_{0j}: \quad \Sigma_1 = \Sigma_2 = \dots \Sigma_{j-1} \ne \Sigma_j $$
the likelihood ratio test statstic is $R^1_j$ given by
$$ \ln R^1_j = m\big[N(j\ln j - (j-1)\ln (j-1)) + (j-1)\ln \big|\sum_{i=1}^{j-1} x_i\big| + \ln|x_j| - j\ln \big|\sum_{i=1}^{j} x_i\big|\ \big] \quad j=2\dots k. $$
and
$$ {\rm prob}(-2\log R^1_j\le z) \simeq P_{\chi^2;f}(z) $$
Now suppose that we conclude $\Sigma_1\ne \Sigma_2$. Then we can continue to look for additional changes by restarting the tests at $j=2$,
$$ \ln R^2_j = m\big[N(j\ln j - (j-1)\ln (j-1)) + (j-1)\ln \big|\sum_{i=2}^{j-1} x_i\big| + \ln|x_j| - j\ln \big|\sum_{i=2}^{j} x_i\big|\ \big] \quad j=3\dots k. $$
For a series of, say, 5 images we therefore have, for each pixel, the following tests to consider
$$ \matrix{ R^1_2 & R^1_3 & R^1_4 & R^1_5 \cr & R^2_3 & R^2_4 & R^2_5 \cr & & R^3_4 & R^3_5 \cr & & & R^4_5 } $$
rect = ee.Geometry.Rectangle( [5.9985351562500009, 50.938486572440667, 6.0946655273437509, 50.973953836311068])
collection = ee.ImageCollection('COPERNICUS/S1_GRD') \
.filterBounds(rect) \
.filterDate(ee.Date('2016-04-01'), ee.Date('2016-09-01')) \
.filter(ee.Filter.eq('transmitterReceiverPolarisation', ['VV','VH'])) \
.filter(ee.Filter.eq('resolution_meters', 10)) \
.filter(ee.Filter.eq('instrumentMode', 'IW')) \
.filter(ee.Filter.eq('orbitProperties_pass', 'ASCENDING')) \
.sort('system:time_start')
acquisition_times = ee.List(collection.aggregate_array('system:time_start')).getInfo()
count = len(acquisition_times)
print count
from auxil import eeWishart
import math
def get_vvvh(image):
''' get 'VV' and 'VH' bands from sentinel-1 imageCollection and restore linear signal from db-values '''
return image.select('VV','VH').multiply(ee.Image.constant(math.log(10.0)/10.0)).exp()
def clipList(current,prev):
''' clip a list of images '''
imlist = ee.List(ee.Dictionary(prev).get('imlist'))
rect = ee.Dictionary(prev).get('rect')
imlist = imlist.add(ee.Image(current).clip(rect))
return ee.Dictionary({'imlist':imlist,'rect':rect})
pcollection = collection.map(get_vvvh)
# get the list of images and clip to roi
pList = pcollection.toList(count)
first = ee.Dictionary({'imlist':ee.List([]),'rect':rect})
imList = ee.Dictionary(pList.iterate(clipList,first)).get('imlist')
# run the algorithm ------------------------------------------
result = ee.Dictionary(eeWishart.omnibus(imList,0.0001,True))
# ------------------------------------------------------------
cmap = ee.Image(result.get('cmap')).byte()
smap = ee.Image(result.get('smap')).byte()
fmap = ee.Image(result.get('fmap')).byte()
bmap = ee.Image(result.get('bmap')).byte()
jet = 'black,blue,cyan,yellow,red'
url = fmap.getThumbURL({'min':0,'max':5,'palette':jet})
disp.Image(url=url,width = 1000)
# background image for video generation
collection1 = ee.ImageCollection('COPERNICUS/S2') \
.filterBounds(rect) \
.filterDate(ee.Date('2016-04-01'), ee.Date('2016-09-01')) \
.sort('CLOUDY_PIXEL_PERCENTAGE',True)
background = ee.Image(collection1.first()) \
.clip(rect) \
.select('B8') \
.divide(10000)
# export
bnames = bmap.bandNames().getInfo()
cmaps = ee.Image.cat(cmap,smap,fmap,bmap,background) \
.rename(['cmap','smap','fmap']+bnames+['background'])
assexport = ee.batch.Export.image.toAsset(cmaps,
description='assetExportTask',
assetId='users/mortcanty/omnibus_change_maps',scale=10,maxPixels=1e9)
assexportid = str(assexport.id)
print '****Exporting to Assets, task id: %s '%assexportid
assexport.start()
run scripts/dispms -f imagery/may0107pca.tif -p [1,2,3] -e 3
# first 4 principal components of ASTER image
image = ee.Image('users/mortcanty/supervisedclassification/may0107pca') \
.select(0,1,2,3)
# training data
table = ee.FeatureCollection('users/mortcanty/supervisedclassification/train')
trainData = image.sampleRegions(table,['CLASS_ID'])
print trainData.size().getInfo()
def jmsep(class1,class2,image,table):
# Jeffries-Matusita separability
table1 = table.filter(
ee.Filter.eq('CLASS_ID',str(class1-1)))
m1 = image.reduceRegion(ee.Reducer.mean(),table1)\
.toArray()
s1 = image.toArray() \
.reduceRegion(ee.Reducer.covariance(),table1)\
.toArray()
table2 = table.filter(
ee.Filter.eq('CLASS_ID',str(class2-1)))
m2 = image.reduceRegion(ee.Reducer.mean(),table2)\
.toArray()
s2 = image.toArray() \
.reduceRegion(ee.Reducer.covariance(),table2,15)\
.toArray()
m12 = m1.subtract(m2)
m12 = ee.Array([m12.toList()]) # makes 2D matrix
s12i = s1.add(s2).divide(2).matrixInverse()
# first term in Bhattacharyya distance
B1 = m12.matrixMultiply(
s12i.matrixMultiply(m12.matrixTranspose())) \
.divide(8)
ds1 = s1.matrixDeterminant()
ds2 = s2.matrixDeterminant()
ds12 = s1.add(s2).matrixDeterminant()
# second term
B2 = ds12.divide(2).divide(ds1.multiply(ds2).sqrt())\
.log().divide(2)
B = ee.Number(B1.add(B2).project([0]).toList().get(0))
# J-M separability
return ee.Number(1).subtract(ee.Number(1) \
.divide(B.exp())) \
.multiply(2)
print jmsep(5,9,image,table).getInfo()
import IPython.display as disp
jet = 'black,blue,cyan,yellow,red,brown'
# rename the class ids from strings to integers
trainData = image.sampleRegions(table,['CLASS_ID'])\
.remap(['0','1','2','3','4','5','6','7','8','9'],
[0,1,2,3,4,5,6,7,8,9],'CLASS_ID')
# train a naive Bayes classifier
classifier = ee.Classifier.continuousNaiveBayes()
trained = classifier.\
train(trainData,'CLASS_ID',image.bandNames())
# classify the image and display
classified = image.classify(trained)
url = classified.select('classification').\
getThumbURL({'min':0,'max':9,'palette':jet})
disp.Image(url=url)
http://ms-image-analysis.appspot.com/static/index.html
https://github.com/mortcanty/earthengine
http://fwenvi-idl.blogspot.de/2017/08/automatic-radiometric-normalization.html
http://fwenvi-idl.blogspot.de/2017/08/automatic-radiometric-normalization_14.html