diff --git a/.directory b/.directory new file mode 100644 index 0000000..6156020 --- /dev/null +++ b/.directory @@ -0,0 +1,7 @@ +[Dolphin] +AdditionalInfo=3 +Timestamp=2020,3,9,7,49,7 +ViewMode=2 + +[Settings] +ShowDotFiles=true diff --git a/IEfeeds_code/.directory b/IEfeeds_code/.directory new file mode 100644 index 0000000..457dce4 --- /dev/null +++ b/IEfeeds_code/.directory @@ -0,0 +1,4 @@ +[Dolphin] +AdditionalInfo=3 +Timestamp=2020,1,9,18,18,6 +ViewMode=1 diff --git a/IEfeeds_code/IEsettings/IE_longnames.txt b/IEfeeds_code/IEsettings/IE_longnames.txt new file mode 100644 index 0000000..4af0635 --- /dev/null +++ b/IEfeeds_code/IEsettings/IE_longnames.txt @@ -0,0 +1 @@ +IE feed from R for various regions with 20 industries and 22 products \ No newline at end of file diff --git a/IEfeeds_code/IEsettings/IE_shortnames.txt b/IEfeeds_code/IEsettings/IE_shortnames.txt new file mode 100644 index 0000000..dfcc649 --- /dev/null +++ b/IEfeeds_code/IEsettings/IE_shortnames.txt @@ -0,0 +1 @@ +Ind20Pro22v1 diff --git a/IEfeeds_code/Ind20Pro22v1_InitialEstimate.m b/IEfeeds_code/Ind20Pro22v1_InitialEstimate.m new file mode 100644 index 0000000..4866435 --- /dev/null +++ b/IEfeeds_code/Ind20Pro22v1_InitialEstimate.m @@ -0,0 +1,49 @@ +function [RegMap,IndMap,ProdMap]=Ind20Pro22v1_InitialEstimate(handles) + + disp('Launching initial estimate for the extended PIOT version 1'); + + % Write handles variables into mat-file for R to read + % Working directory + filename = [handles.processeddatadir,'WorkingDirectory4R.mat']; + out = handles.motherALANGdir; + save(filename,'out'); + + % Write region aggregator to file + filename = [handles.processeddatadir,'RegionAggFile4R.mat']; + out = handles.regionaggfile; + save(filename,'out'); + + % Region aggregator + RegMap = csvread(handles.regionagg); + + if size(RegMap,1) > size(RegMap,2) % check orientation of aggregator + RegMap = RegMap'; + end + + reg_proxy = ones(size(RegMap,2),1); + RegMap = prorate(RegMap,'col_proxy',reg_proxy); + + % Product aggregator + ProdMap = csvread(handles.sectoraggprod); + + if size(ProdMap,1) > size(ProdMap,2) % check orientation of aggregator + ProdMap = ProdMap'; + end + + prod_proxy = ones(size(ProdMap,2),1); + ProdMap = prorate(ProdMap,'col_proxy',prod_proxy); + + % Industry aggregator + IndMap = csvread(handles.sectoragg); + + if size(IndMap,1) > size(IndMap,2) % check orientation of aggregator + IndMap = IndMap'; + end + + ind_proxy = ones(size(IndMap,2),1); + IndMap = prorate(IndMap,'col_proxy',ind_proxy); + + command = 'Rscript /import/emily1/isa/IELab/Roots/PIOLab/Rscripts/IEfeeds_code/Ind20Pro22v1_InitialEstimate.R'; + system(command,'-echo'); + + end \ No newline at end of file diff --git a/IEfeeds_code/README.txt b/IEfeeds_code/README.txt index 21e6d8d..2460986 100644 --- a/IEfeeds_code/README.txt +++ b/IEfeeds_code/README.txt @@ -1 +1 @@ -See \PIOLab\Rscripts\IEfeeds_code for IEfeeds_code. \ No newline at end of file +See \PIOLab\Rscripts\IEfeeds_code for IE feeds in R scripts. \ No newline at end of file diff --git a/IEfeeds_code/datafeed_PIOLab_InitialEstimate.m b/IEfeeds_code/datafeed_PIOLab_InitialEstimate.m new file mode 100644 index 0000000..f93a7e6 --- /dev/null +++ b/IEfeeds_code/datafeed_PIOLab_InitialEstimate.m @@ -0,0 +1,11 @@ +function [RegMap,IndMap,ProdMap] = datafeed_PIOLab_InitialEstimate(handles) + +% Master file for IE feeds + + if handles.nonsurvey == 1 + + [RegMap,IndMap,ProdMap]=Ind20Pro22v1_InitialEstimate(handles); + + end + +end \ No newline at end of file diff --git a/Rscripts/datafeeds_code/datafeed_PIOLab_BACI.R b/Rscripts/datafeeds_code/datafeed_PIOLab_BACI.R index 7e9bc4e..1ac48e5 100644 --- a/Rscripts/datafeeds_code/datafeed_PIOLab_BACI.R +++ b/Rscripts/datafeeds_code/datafeed_PIOLab_BACI.R @@ -16,6 +16,9 @@ if(Sys.info()[1] == "Linux"){ source(paste0(root_folder,"Rscripts/Subroutines/InitializationR.R")) path["df_Processed"] <- paste0(path$Processed,"/",datafeed_name) +# Load function to write arrays to files +source(paste0(path$Subroutines,"/Numbers2File.R")) + # Loading raw data source(paste0(path$Subroutines,"/Load_BACI.R")) @@ -26,7 +29,7 @@ data <- SE_LogRegression(data,RSE$Minimum,RSE$Maximum) data <- select(data,-value) # Set variables -reg_max <- nrow(root$region) +n_reg <- nrow(root$region) n_yea <- as.character(year-2007) n_she <- "1" n_pro <- nrow(root$product) @@ -36,45 +39,40 @@ n_ind <- nrow(root$industry) source(paste0(path$Subroutines,"/makeALANGheadline.R")) # Check if folder with processed data exists, in case delete and create empty one -path_set <- paste0(path$root,"ProcessedData/",datafeed_name) -if(dir.exists(path_set)) unlink(path_set,recursive = TRUE) -dir.create(path_set) - -mat <- matrix(0,nrow = n_pro,ncol = 1) # Empty matrix to put numbers in +if(dir.exists(path$df_Processed)) unlink(path$df_Processed,recursive = TRUE) +dir.create(path$df_Processed) a <- 1 # Set starting value for alang line index + for(i in unique(data$From)) # Loop over the exporting regions { - data_sel <- filter(data,From == i) # Filter exporting region + sel <- list("data" = filter(data,From == i)) # Filter exporting region - for(j in unique(data_sel$To)) # Loop over trade partners + for(j in unique(sel$data$To)) # Loop over trade partners { # Add empty line with tag ALANG <- add_row(ALANG,'1' = paste0("DataFeed BACI from ",root$region$Name[i]," to ",root$region$Name[j])) - trader_sel <- filter(data_sel,To == j) %>% select(-From,-To) # select data for trade partners - - values <- mat # Create empty column vector - values[trader_sel$Product,1] <- trader_sel$Quantity # Write values + # select data for trade partners: + sel[["trader"]] <- filter(sel$data,To == j) %>% select(Product,Quantity,SE) - filename_value <- paste0("BACI_",year,"_Values_",root$region$Name[i],"-", - root$region$Name[j],".csv") # Set name of the file - - write.table(values,row.names = FALSE,col.names = FALSE, sep = ",", - file = paste0(path_set,"/",filename_value)) # Write array to folder - - SE <- mat # Create empty column vector and write SE + df <- data.frame("RHS" = rep(0,n_pro),"SE" = rep(0,n_pro)) # Create empty data frame - SE[trader_sel$Product,1] <- trader_sel$SE # Write SE numbers into array + df$RHS[sel$trader$Product] <- sel$trader$Quantity # Write values + df$SE[sel$trader$Product] <- sel$trader$SE # Write standard errors - filename_SE <- paste0("BACI_",year,"_SE_",root$region$Name[i],"-", - root$region$Name[j],".csv") # Set name of the file + # Set name of the files: + filename <- list("RHS" = paste0("/BACI/BACI_",year,"_RHS_",root$region$Name[i], + "-",root$region$Name[j],".csv"), + "SE" = paste0("/BACI/BACI_",year,"_SE_",root$region$Name[i],"-", + root$region$Name[j],".csv")) - write.table(SE,row.names = FALSE,col.names = FALSE, sep = ",", - file = paste0(path_set,"/",filename_SE)) # Write array to folder + # Write data to file: + Numbers2File(df$RHS,paste0(path$Processed,filename$RHS)) + Numbers2File(df$SE,paste0(path$Processed,filename$SE)) - ALANG$Value[a] <- paste0("DATAPATH/",filename_value) - ALANG$S.E.[a] <- paste0("DATAPATH/",filename_SE) + ALANG$Value[a] <- paste0("DATAPATH",filename$RHS) + ALANG$S.E.[a] <- paste0("DATAPATH",filename$SE) ALANG$`Row parent`[a] <- as.character(i) ALANG$`Column parent`[a] <- as.character(j) @@ -97,17 +95,6 @@ ALANG$Margin <- n_she ALANG$`Row child` <- "2" ALANG$`Row grandchild` <- "1:e" ALANG$`Column child` <- "1" - -# # Create and write sector concordance to file -# Concord <- matrix(1,nrow = n_pro,ncol = n_ind) # block matrix required for aggregation -# -# # Set name and path to concordance and write to folder -# Concorda_name <- "BACI_Sec_Concordance" -# Concord_path <- paste0(path$Concordance,"/",Concorda_name,".csv") -# write.table(Concord,file = Concord_path,row.names = FALSE,col.names = FALSE,sep = ",") - -# Add path to concordance to ALANG commands -#ALANG$`Column grandchild` <- paste0("1-e t2 CONCPATH/",Concorda_name,".csv") ALANG$`Column grandchild` <- "1-e" # Call script that writes the ALANG file to the repsective folder in the root diff --git a/Rscripts/datafeeds_code/datafeed_PIOLab_IRPexports.R b/Rscripts/datafeeds_code/datafeed_PIOLab_IRPexports.R index 81ee5de..9734abc 100644 --- a/Rscripts/datafeeds_code/datafeed_PIOLab_IRPexports.R +++ b/Rscripts/datafeeds_code/datafeed_PIOLab_IRPexports.R @@ -11,9 +11,15 @@ if(Sys.info()[1] == "Linux"){ root_folder <- "/import/emily1/isa/IELab/Roots/PIOLab/"}else{ root_folder <- "C:/Users/hwieland/Github workspace/PIOLab/"} ################################################################################ + # Initializing R script (load R packages and set paths to folders etc.) source(paste0(root_folder,"Rscripts/Subroutines/InitializationR.R")) +path["df_Processed"] <- paste0(path$Processed,"/",datafeed_name) + +# Load function to write arrays to files +source(paste0(path$Subroutines,"/Numbers2File.R")) + # Loading raw data data <- read.csv(paste0(path$Raw,"/IRP/all_CCC_Exp_ResearchDB.csv"),stringsAsFactors=FALSE) colnames(data)[6:49] <- 1970:2013 @@ -36,7 +42,7 @@ data <- data %>% filter(!Code %in% c(233,242)) data <- data[!is.na(data$Code),] # Mayotte and Farour Isl. are not in root (tbc) -reg_max <- nrow(root$region) +n_reg <- nrow(root$region) # Add standard errors source(paste0(path$Subroutines,"/SE_LogRegression.R")) @@ -52,21 +58,21 @@ for(i in 1:nrow(data)) # Get root_code of regions reg <- data$Code[i] - if(reg == 1) reg_range <- paste0("2-",as.character(reg_max)) + if(reg == 1) reg_range <- paste0("2-",as.character(n_reg)) - if(reg == 2) reg_range <- paste0("[1,3-",as.character(reg_max),"]") + if(reg == 2) reg_range <- paste0("[1,3-",as.character(n_reg),"]") - if(reg == reg_max) reg_range <- paste0("1-",as.character(reg_max-1)) + if(reg == n_reg) reg_range <- paste0("1-",as.character(n_reg-1)) - if(reg == (reg_max-1)) reg_range <- paste0("[1-",as.character(reg_max-2),",",as.character(reg_max),"]") + if(reg == (n_reg-1)) reg_range <- paste0("[1-",as.character(n_reg-2),",",as.character(n_reg),"]") - if(reg > 2 & reg < (reg_max-1)) { + if(reg > 2 & reg < (n_reg-1)) { reg_range <- paste0("[1-",as.character(reg-1),",",as.character(reg+1),"-", - as.character(reg_max),"]") } + as.character(n_reg),"]") } # Read import value value <- as.character(data$Quantity[i]) - # Set SE to 5% + # Set SE SE <- as.character(data$SE[i]) reg_name <- root$region$Name[reg] reg <- as.character(reg) diff --git a/Rscripts/datafeeds_code/datafeed_PIOLab_IRPextraction.R b/Rscripts/datafeeds_code/datafeed_PIOLab_IRPextraction.R index 2bc02c4..17ee55a 100644 --- a/Rscripts/datafeeds_code/datafeed_PIOLab_IRPextraction.R +++ b/Rscripts/datafeeds_code/datafeed_PIOLab_IRPextraction.R @@ -3,12 +3,6 @@ datafeed_name <- "IRPextraction" print(paste0("datafeed_PIOLab_",datafeed_name," initiated.")) -# Create wrapper for writing clean data -WriteFile <- function(d,t) -{ - write.table(d,file = t,row.names = FALSE,col.names = FALSE,sep = ",",na = "NaN") -} - ################################################################################ # Set library path when running on suphys server if(Sys.info()[1] == "Linux"){ @@ -19,72 +13,58 @@ if(Sys.info()[1] == "Linux"){ ################################################################################ # Initializing R script (load R packages and set paths to folders etc.) source(paste0(root_folder,"Rscripts/Subroutines/InitializationR.R")) - + +path["df_Processed"] <- paste0(path$Processed,"/",datafeed_name) + +# Load function to write arrays to files +source(paste0(path$Subroutines,"/Numbers2File.R")) + # Loading raw data source(paste0(path$Subroutines,"/Read_ExtractionIRP.R")) # Loading function for estimating SE with linear regression source(paste0(path$Subroutines,"/SE_LogRegression.R")) +# read upper and lower error bounds from settings file RSE <- filter(read.xlsx(path$RSE_settings),Item == datafeed_name) +# Estimate standard error data <- SE_LogRegression(data,RSE$Minimum,RSE$Maximum) n_reg <- nrow(root$region) # Number of root regions -data_new <- matrix(NaN,nrow = n_reg,ncol = 2) +df <- data.frame("RHS" = rep(NaN,n_reg), "SE" = rep(NaN,n_reg)) +df$RHS[data$Code] <- data$Quantity +df$SE[data$Code] <- data$SE -data_new[data$Code,1] <- data$Quantity -data_new[data$Code,2] <- data$SE -data <- as.data.frame(data_new) -colnames(data) <- c("RHS","SE") -remove(data_new) +# Check if folder with processed data exists, in case delete and create empty one +if(dir.exists(path$df_Processed)) unlink(path$df_Processed,recursive = TRUE) +dir.create(path$df_Processed) # Create empty ALANG table with header source(paste0(path$Subroutines,"/makeALANGheadline.R")) -# Check if folder with processed data exists, in case delete and create empty one -path_set <- paste0(path$root,"ProcessedData/",datafeed_name) -if(dir.exists(path_set)) unlink(path_set,recursive = TRUE) -dir.create(path_set) - -# Set path to Sector Concordance -filename <- list("IndAgg" ="Sector Aggregators/20IndV1_SectorAggregatorIndustries.csv") - -IndAgg <- read.csv(paste0(path$Concordance,"/",filename$IndAgg),header = FALSE) -IndAgg <- as.matrix(IndAgg) - -# Create Root2Root region concordance (identity ) -RegAgg <- diag(length(root$region$Code)) # Create region aggregator - -# Set name of concordance -filename["RegAgg"] <- "Root2Root_Reg_Concordance.csv" - -# Write to folder -WriteFile(RegAgg,paste0(path$Concordance,"/",filename$RegAgg)) - -# Define section of path to processed data file -filename["RHS"] <- paste0(path$Processed,"/",datafeed_name,"/",year,"_") - -for(i in root$region$Code) +for(i in data$Code) { - data_sel <- data.frame("RSE" = rep(NaN,n_reg),"SE" = rep(NaN,n_reg)) + sel <- df + sel[-i,] <- NaN - data_sel$RSE[i] <- data$RHS[i] - data_sel$SE[i] <- data$SE[i] + filename <- list("RHS" = paste0("/",datafeed_name,"/",datafeed_name,"_RHS_",year, + "_",root$region$RootCountryAbbreviation[i],".csv"), + "SE" = paste0("/",datafeed_name,"/",datafeed_name,"_SE_",year, + "_",root$region$RootCountryAbbreviation[i],".csv")) # Write RHS and SE data to folder - WriteFile(data_sel$RSE,paste0(filename$RHS,"RHS_",root$region$RootCountryAbbreviation[i],"_.csv")) - WriteFile(data_sel$SE[i],paste0(filename$RHS,"SE_",root$region$RootCountryAbbreviation[i],"_.csv")) + Numbers2File(sel$RHS,paste0(path$Processed,filename$RHS)) + Numbers2File(sel$SE,paste0(path$Processed,filename$SE)) - # Add command for domestic Use table - ALANG <- add_row(ALANG,'#' = i) + ALANG <- add_row( ALANG, + '1' = paste0("DataFeed IRP Extraction ",year," ",root$region$Name[i]), + 'Value' = paste0("DATAPATH",filename$RHS), + 'S.E.' = paste0("DATAPATH",filename$SE) ) } -ALANG$`1` <- paste0("DataFeed IRP Extraction ",year," ",root$region$Name) -ALANG$Value <- paste0(filename$RHS,"RHS_",root$region$RootCountryAbbreviation,"_.csv") -ALANG$S.E. <- paste0(filename$RHS,"SE_",root$region$RootCountryAbbreviation,"_.csv") - +ALANG$`#` <- 1:nrow(ALANG) ALANG$Coef1 <- "1" ALANG$Years <- "1" ALANG$Margin <- "1" @@ -108,4 +88,3 @@ ALANG$`Post-Map` <- "" source(paste0(path$root,"Rscripts/datafeeds_code/datafeed_subroutines/WriteALANG2Folder.R")) print(paste0("datafeed_PIOLab_",datafeed_name," finished.")) - diff --git a/Rscripts/datafeeds_code/datafeed_PIOLab_IRPimports.R b/Rscripts/datafeeds_code/datafeed_PIOLab_IRPimports.R index 1af3e51..7bd1da9 100644 --- a/Rscripts/datafeeds_code/datafeed_PIOLab_IRPimports.R +++ b/Rscripts/datafeeds_code/datafeed_PIOLab_IRPimports.R @@ -12,6 +12,7 @@ if(Sys.info()[1] == "Linux"){ root_folder <- "/import/emily1/isa/IELab/Roots/PIOLab/"}else{ root_folder <- "C:/Users/hwieland/Github workspace/PIOLab/"} ################################################################################ + # Initializing R script (load R packages and set paths to folders etc.) source(paste0(root_folder,"Rscripts/Subroutines/InitializationR.R")) @@ -43,7 +44,7 @@ source(paste0(path$Subroutines,"/SE_LogRegression.R")) RSE <- filter(read.xlsx(path$RSE_settings),Item == datafeed_name) data <- SE_LogRegression(data,RSE$Minimum,RSE$Maximum) -reg_max <- nrow(root$region) +n_reg <- nrow(root$region) # Create empty ALANG table with header source(paste0(path$Subroutines,"/makeALANGheadline.R")) @@ -54,17 +55,17 @@ for(i in 1:nrow(data)) # Get root_code of regions reg <- data$Code[i] - if(reg == 1) reg_range <- paste0("2-",as.character(reg_max)) + if(reg == 1) reg_range <- paste0("2-",as.character(n_reg)) - if(reg == 2) reg_range <- paste0("[1,3-",as.character(reg_max),"]") + if(reg == 2) reg_range <- paste0("[1,3-",as.character(n_reg),"]") - if(reg == reg_max) reg_range <- paste0("1-",as.character(reg_max-1)) + if(reg == n_reg) reg_range <- paste0("1-",as.character(n_reg-1)) - if(reg == (reg_max-1)) reg_range <- paste0("[1-",as.character(reg_max-2),",",as.character(reg_max),"]") + if(reg == (n_reg-1)) reg_range <- paste0("[1-",as.character(n_reg-2),",",as.character(n_reg),"]") - if(reg > 2 & reg < (reg_max-1)) { + if(reg > 2 & reg < (n_reg-1)) { reg_range <- paste0("[1-",as.character(reg-1),",",as.character(reg+1),"-", - as.character(reg_max),"]") } + as.character(n_reg),"]") } # Read import value value <- as.character(data$Quantity[i]) diff --git a/Rscripts/datafeeds_code/datafeed_PIOLab_PauliukEoL.R b/Rscripts/datafeeds_code/datafeed_PIOLab_PauliukEoL.R index 6f3a677..2f8c904 100644 --- a/Rscripts/datafeeds_code/datafeed_PIOLab_PauliukEoL.R +++ b/Rscripts/datafeeds_code/datafeed_PIOLab_PauliukEoL.R @@ -11,144 +11,143 @@ if(Sys.info()[1] == "Linux"){ root_folder <- "/import/emily1/isa/IELab/Roots/PIOLab/"}else{ root_folder <- "C:/Users/hwieland/Github workspace/PIOLab/"} ################################################################################ + # Initializing R script (load R packages and set paths to folders etc.) source(paste0(root_folder,"Rscripts/Subroutines/InitializationR.R")) - -# Load Pauliuk data in root classification +path["df_Processed"] <- paste0(path$Processed,"/",datafeed_name) + +# Load function to write arrays to files +source(paste0(path$Subroutines,"/Numbers2File.R")) + +# Load Pauliuk data in quasi root classification source(paste0(path$Subroutines,"/Load_PauliukEoL.R")) -# Check whether NA exist and delete if so -data <- data[!is.na(data$Code),] -# Issues regarding Pauliuk EoL data: -# A number of countries are only available as aggregated regions, these are -# the former USSR (root code: 178), Former Yugoslavia (242), Czechoslovakia (56) -# Belgium-Luxembbourg. Furthermore the data set does not include Taiwan (212). -# Therefore write ALANG commands that specifically address the sum of these countries -# and add NaN for Taiwan. +# Check whether NA exist and add artificial code until later processing in this script + +data$Code[is.na(data$Code)] <- 666 + +data <- select(data,Code,Quantity) # Select only variables that are needed + +data <- group_by(data,Code) %>% summarise(Quantity = sum(Quantity)) %>% ungroup(Code) # Loading function for estimating SE with linear regression source(paste0(path$Subroutines,"/SE_LogRegression.R")) + RSE <- filter(read.xlsx(path$RSE_settings),Item == datafeed_name) -data <- SE_LogRegression(data,RSE$Minimum,RSE$Maximum) -data <- select(data,Code,Quantity,SE) -data[nrow(data)+1,] <- c(201,NaN,NaN) +data <- SE_LogRegression(data,RSE$Minimum,RSE$Maximum) # Estimate standard errors +colnames(data)[2:3] <- c("RHS","SE") + +data <- data[order(data$Code),] +index <- data.frame("Source" = 1:nrow(data), "Root" = data$Code) + +# Issues regarding Pauliuk EoL data: +# A number of countries are only available as aggregated regions, these are +# the former USSR (source code 86), Former Yugoslavia (106), Czechoslovakia (27), Belgium-Luxembbourg (8). +# Furthermore the data set does not include Taiwan (implicit in nec regions w. code 666). + +# Store source and root region codes for aggregation in one list + +Reg <- list("Source" = c(86,106,27,8), + "Root" = list(c(166,65,119,117,24,205,123,73,10,14,100,209,194,102,193), + c(23,86,128,132,178,183), + c(52,182), + c(16,118) )) + +# Create concodance matrix Source2Root for Pauliuk data + +Conco <- matrix(0,nrow = nrow(data),ncol = nrow(root$region)) + +# Remove indices that are not to be aggregated: +index <- index[-nrow(index),] # NEC regions +index <- index[-Reg$Source,] # Aggregated regions + +# Write source2region codes for all regions that are not aggregated: + +Conco[as.matrix(index)] <- 1 + +Conco[Reg$Source[1],Reg$Root[[1]]] <- 1 # Former USSR countries + +Conco[Reg$Source[2],Reg$Root[[2]]] <- 1 # Former Yugoslavia + +Conco[Reg$Source[3],Reg$Root[[3]]] <- 1 # Czechoslovakia + +Conco[Reg$Source[4],Reg$Root[[4]]] <- 1 # Belgium-Luxembourg + +# Allocate the remaining root regions to source region NEC +Conco[109,colSums(Conco) == 0] <- 1 + +# Print sum of matrix which should be 221 +print(paste0("Sum of aggregator matrix = ",sum(Conco))) + +filename_RegAgg <- "/PauliukEoL_Reg_Source2Root.csv" # Define name of file + +Numbers2File(Conco,paste0(path$Concordance,filename_RegAgg)) # Save aggregator # Check if folder with processed data exists, in case delete and create empty one -path_set <- paste0(path$root,"ProcessedData/",datafeed_name) -if(dir.exists(path_set)) unlink(path_set,recursive = TRUE) -dir.create(path_set) +if(dir.exists(path$df_Processed)) unlink(path$df_Processed,recursive = TRUE) +dir.create(path$df_Processed) # Create empty ALANG table with header source(paste0(path$Subroutines,"/makeALANGheadline.R")) -for(i in 1:nrow(data)) +for(i in data$Code) { - - reg_num <- data$Code[i] # Get root_code of region - reg_name <- as.character(root$region$Name[reg_num]) # Read region name - - # Write data to processed folder - # First, value RHS - export_value <- matrix(c(data$Quantity[i],0),nrow = 1,ncol = 2) - filename_value <- paste0(datafeed_name,"_Value_",year,"_",reg_name,".csv") - write.table(export_value,row.names = FALSE,col.names = FALSE,sep = ",", - file = paste0(path_set,"/",filename_value)) - # Second, standard errors - export_SE <- matrix(c(data$SE[i],0),nrow = 2,ncol = 1) - filename_SE <- paste0(datafeed_name,"_SE_",year,"_",reg_name,".csv") - write.table(export_SE,row.names = FALSE,col.names = FALSE,sep = ",", - file = paste0(path_set,"/",filename_SE)) - - # Read extraction value - value <- paste0("DATAPATH/",datafeed_name,"/",filename_value) - # Set SE - SE <- paste0("DATAPATH/",datafeed_name,"/",filename_SE) - - if(reg_num == 166) # Former USSR + if(i == 666) { - # Codes of former Soviet countries: - aggreg <- c(166,65,119,117,24,205,123,73,10,14,100,209,194,102,193) - reg_num <- paste(aggreg,collapse = ",") - - ALANG <- add_row(ALANG,'1' = paste0("Pauliuk EoL ",reg_name),Value = value, - 'Row parent' = reg_num,'Column parent' = reg_num,S.E. = SE) - - } else if (reg_num == 218) # Former Yugoslavia - { - # Codes of countries of former Yugoslavia: - aggreg <- c(23,86,128,132,178,183) - reg_num <- paste(aggreg,collapse = ",") - - ALANG <- add_row(ALANG,'1' = paste0("Pauliuk EoL ",reg_name),Value = value, - 'Row parent' = reg_num,'Column parent' = reg_num,S.E. = SE) - - } else if (reg_num == 47) # Czechoslovakia - { - # Codes of Czechia and SLovakia: - aggreg <- c(52,182) - reg_num <- paste(aggreg,collapse = ",") - - ALANG <- add_row(ALANG,'1' = paste0("Pauliuk EoL ",reg_name),Value = value, - 'Row parent' = reg_num,'Column parent' = reg_num,S.E. = SE) - - } else if (reg_num == 16) # Belgium-Luxembourg - { - - aggreg <- c(16,118) - reg_num <- paste(aggreg,collapse = ",") - - ALANG <- add_row(ALANG,'1' = paste0("Pauliuk EoL ",reg_name),Value = value, - 'Row parent' = reg_num,'Column parent' = reg_num,S.E. = SE) - - } else if (reg_num == 201) # Missing Taiwan - { - # Get root_code of region - reg_num <- as.character(reg_num) - - ALANG <- add_row(ALANG,'1' = paste0("Pauliuk EoL ",reg_name),Value = value, - 'Row parent' = reg_num,'Column parent' = reg_num,S.E. = SE) - + reg_name <- "NEC" } else { - reg_num <- as.character(reg_num) - ALANG <- add_row(ALANG,'1' = paste0("Pauliuk EoL ",reg_name),Value = value, - 'Row parent' = reg_num,'Column parent' = reg_num,S.E. = SE) + reg_name <- root$region$Name[i] # Read name of region } -} + + sel <- data[,-1] # Create copy of data array + + sel[-i,] <- NaN # Write only values of target region + + # Define filenames of arrays: + + filename_sel <- list("RHS" = paste0("/",datafeed_name,"/",datafeed_name,"_RHS_",year, + "_",reg_name,".csv"), + "SE" = paste0("/",datafeed_name,"/",datafeed_name,"_SE_",year, + "_",reg_name,".csv")) + + # Write RHS and SE data to folder: + + Numbers2File(sel$RHS,paste0(path$Processed,filename_sel$RHS)) + Numbers2File(sel$SE,paste0(path$Processed,filename_sel$SE)) -# Create industry concordance -max_ind <- length(root$industry$Code) -Concord <- matrix(0,nrow = 2, ncol = max_ind) -Concord[1,c(64,65)] <- 1 -Concord[2,c(1:63,66:max_ind)] <- 1 + ALANG <- add_row(ALANG, + '1' = paste("Pauliuk EoL",reg_name,year), + Value = paste0("DATAPATH",filename_sel$RHS), + S.E. = paste0("DATAPATH",filename_sel$RHS) ) +} -# Set name and path to concordance and write to folder -Concorda_name <- "EoLPauliuk_Sec_Concordance" -Concord_path <- paste0(path$Concordance,"/",Concorda_name,".csv") -write.table(Concord,file = Concord_path,row.names = FALSE,col.names = FALSE,sep = ",") +# Add other variables +ALANG$`#` <- as.character(1:nrow(ALANG)) -# Add path to concordance to ALANG commands -ALANG$`Column grandchild` <- paste0("1:e t2 CONCPATH/",Concorda_name,".csv") +ALANG$Years <- "1" +ALANG$Margin <- "1" +ALANG$Coef1 <- "1" -# Add other variables +ALANG$`Row parent` <- "1:e" ALANG$`Row child` <- "3" ALANG$`Row grandchild` <- "2" + +ALANG$`Column parent` <- paste0("1:e~3 a CONCPATH",filename_RegAgg) ALANG$`Column child` <- "1" -ALANG$`#` <- as.character(1:nrow(ALANG)) +ALANG$`Column grandchild` <- "64-65" + ALANG$Incl <- "Y" ALANG$Parts <- "1" ALANG$`Pre-map` <- "" ALANG$`Post-map` <- "" ALANG$`Pre-Map` <- "" ALANG$`Post-Map` <- "" -ALANG$Years <- "1" -ALANG$Margin <- "1" -ALANG$Coef1 <- "1" # Call script that writes the ALANG file to the repsective folder in the root source(paste0(path$root,"Rscripts/datafeeds_code/datafeed_subroutines/WriteALANG2Folder.R")) print(paste0("datafeed_PIOLab_",datafeed_name," finished.")) - \ No newline at end of file + diff --git a/Rscripts/datafeeds_code/datafeed_PIOLab_PrimaryInputEqualsFinalUse.R b/Rscripts/datafeeds_code/datafeed_PIOLab_PrimaryInputEqualsFinalUse.R index b54283c..34bcff6 100644 --- a/Rscripts/datafeeds_code/datafeed_PIOLab_PrimaryInputEqualsFinalUse.R +++ b/Rscripts/datafeeds_code/datafeed_PIOLab_PrimaryInputEqualsFinalUse.R @@ -13,31 +13,51 @@ if(Sys.info()[1] == "Linux"){ root_folder <- "/import/emily1/isa/IELab/Roots/PIOLab/"}else{ root_folder <- "C:/Users/hwieland/Github workspace/PIOLab/"} ################################################################################ + # Initializing R script (load R packages and set paths to folders etc.) source(paste0(root_folder,"Rscripts/Subroutines/InitializationR.R")) # Read global extraction and EOL values source(paste0(path$Subroutines,"/Read_ExtractionIRP.R")) DE <- sum(data$Quantity) + # For now just this (assume per ton of iron ore 1.3 ton of flux, air and coke are required) # Set SE RSE <- filter(read.xlsx(path$RSE_settings),Item == datafeed_name) tot <- DE + DE*1.3 SE <- as.character(round(tot*RSE$Minimum)) + # Create empty ALANG table with header source(paste0(path$Subroutines,"/makeALANGheadline.R")) + # Extend table with additional columns ALANG <- ALANG[,c(1:19,11:19)] -ALANG <- add_row(ALANG,'1' = "Sum of primary inputs equals sum of final use", - Coef1 = "1",'Row parent' = "1-e",'Row child' = "3",'Row grandchild' = "1-e", - 'Column parent' = "1-e",'Column child' = "1",'Column grandchild' = "1-e", - 'Coef1.1' = "-1",'Row parent.1' = "1-e",'Row child.1' = "1-2",'Row grandchild.1' = "1-e", - 'Column parent.1' = "1-e",'Column child.1' = "3",'Column grandchild.1' = "1-e") +ALANG <- add_row(ALANG,'1' = "Sum of primary inputs equals sum of final use") + + + +# Part 1: Sum over primary inputs +ALANG$Coef1 <- "1" +ALANG$`Row parent` <- "1-e" +ALANG$`Row child` <- "3" +ALANG$`Row grandchild` <- "1-e" +ALANG$`Column parent` <- "1-e" +ALANG$`Column child` <- "1" +ALANG$`Column grandchild` <- "1-e" +# Part 2: Sum over final use +ALANG$Coef1.1 <- "-1" +ALANG$`Row parent.1` <- "1-e" +ALANG$`Row child.1` <- "1-2" +ALANG$`Row grandchild.1` <- "1-e" +ALANG$`Column parent.1` <- "1-e" +ALANG$`Column child.1` <- "3" +ALANG$`Column grandchild.1` <- "1-e" + # Add other variables -ALANG$`#` <- as.character(1:nrow(ALANG)) +ALANG$`#` <- "1" ALANG$Incl <- "Y" ALANG$Parts <- "2" ALANG$Value <- "0" diff --git a/Rscripts/datafeeds_code/datafeed_PIOLab_WSABOFsteel.R b/Rscripts/datafeeds_code/datafeed_PIOLab_WSABOFsteel.R index 52c1408..b544757 100644 --- a/Rscripts/datafeeds_code/datafeed_PIOLab_WSABOFsteel.R +++ b/Rscripts/datafeeds_code/datafeed_PIOLab_WSABOFsteel.R @@ -11,24 +11,14 @@ if(Sys.info()[1] == "Linux"){ root_folder <- "/import/emily1/isa/IELab/Roots/PIOLab/"}else{ root_folder <- "C:/Users/hwieland/Github workspace/PIOLab/"} ################################################################################ + # Initializing R script (load R packages and set paths to folders etc.) source(paste0(root_folder,"Rscripts/Subroutines/InitializationR.R")) -# Long rolled products have the item code 7 in WSA data -item_id <- 4 - -# Get relative standard error for smallest and largest values in the data set -RSE <- filter(read.xlsx(path$RSE_settings),Item == datafeed_name) +path["df_Processed"] <- paste0(path$Processed,"/",datafeed_name) # Add path for processed data -# Set range of products and industries to be adressed by this feed -Grandchild <- list("RoW" = "20","Column" = "30") - -# Load function and create ALANG commands +# Run code for ALANG commands source(paste0(path$root,"Rscripts/datafeeds_code/datafeed_subroutines/CreateALANGforWSAdata.R")) -ALANG <- CreateALANGforWSAdata(item_id,RSE,Grandchild,datafeed_name) - -# Call script that writes the ALANG file to the respective folder in the root -source(paste0(path$root,"Rscripts/datafeeds_code/datafeed_subroutines/WriteALANG2Folder.R")) print(paste0("datafeed_PIOLab_",datafeed_name," finished.")) \ No newline at end of file diff --git a/Rscripts/datafeeds_code/datafeed_subroutines/CreateALANGforWSAdata.R b/Rscripts/datafeeds_code/datafeed_subroutines/CreateALANGforWSAdata.R index ff374aa..06be0f2 100644 --- a/Rscripts/datafeeds_code/datafeed_subroutines/CreateALANGforWSAdata.R +++ b/Rscripts/datafeeds_code/datafeed_subroutines/CreateALANGforWSAdata.R @@ -1,56 +1,94 @@ ################################################################################ # This is a subfunction for the data feeds that process the World Steel Association data -CreateALANGforWSAdata <- function(item_id,RSE,Grandchild,datafeed_name) -{ - # Load specific yearbook - source(paste0(path$Subroutines,"/Load_YearbookWSA.R")) - # Load function to align data with root classification - source(paste0(path$Subroutines,"/Read_ProductionWSA.R")) - # Select Long Rolled Products - item_page <- items[[item_id]]$page - # Read values and align with root classification - data <- Read_ProductionWSA(path,year,item_page,yb,concord) - - # Loading function for estimating SE with linear regression - source(paste0(path$Subroutines,"/SE_LogRegression.R")) - data <- SE_LogRegression(data,RSE$Minimum,RSE$Maximum) +# Check if folder with processed data exists, in case delete and create empty one +if(dir.exists(path$df_Processed)) unlink(path$df_Processed,recursive = TRUE) +dir.create(path$df_Processed) + +# Load function to write arrays to files +source(paste0(path$Subroutines,"/Numbers2File.R")) + +# Get relative standard error for smallest and largest values in the data set +RSE <- filter(read.xlsx(path$RSE_settings),Item == datafeed_name) + +# Long data feed settings for WSA accounts +Settings <- read.xlsx(paste0(path$Settings,"/datafeeds_settings/WSA_settings.xlsx")) + +Settings <- filter(Settings,datafeed_name == datafeed_name) + +# Set range of products and industries to be adressed by this feed +Grandchild <- list("RoW" = as.character(Settings$Row),"Column" = as.character(Settings$Col)) + +# Load specific yearbook +source(paste0(path$Subroutines,"/Load_YearbookWSA.R")) + +# Load function to align data with root classification +source(paste0(path$Subroutines,"/Read_ProductionWSA.R")) + +# Read page number (WSA Yearbook) from id +item_page <- items[[Settings$id]]$page + +# Read values and align with root classification +data <- Read_ProductionWSA(path,year,item_page,yb,concord) - # Create empty ALANG table with header - source(paste0(path$Subroutines,"/makeALANGheadline.R")) - # Extend table with additional columns +# Loading function for estimating SE with linear regression +source(paste0(path$Subroutines,"/SE_LogRegression.R")) +data <- SE_LogRegression(data,RSE$Minimum,RSE$Maximum) - for(i in 1:nrow(data)) - { - # Get root_code of region - reg_num <- data$Code[i] - reg_name <- as.character(root$region$Name[reg_num]) - reg_num <- as.character(reg_num) - # Read extraction value - value <- as.character(data$Quantity[i]) - # Set SE - SE <- as.character(data$SE[i]) - - # Add command - ALANG <- add_row(ALANG,'1' = paste(datafeed_name,reg_name), - Value = value,'Row parent' = reg_num,'Column parent' = reg_num,S.E. = SE) - } - - # Add other variables - ALANG$`Column child` <- "2" - ALANG$`Column grandchild` <- Grandchild$Column - ALANG$`Row child` <- "1" - ALANG$`Row grandchild` <- Grandchild$RoW - ALANG$`#` <- as.character(1:nrow(ALANG)) - ALANG$Incl <- "Y" - ALANG$Parts <- "1" - ALANG$`Pre-map` <- "" - ALANG$`Post-map` <- "" - ALANG$`Pre-Map` <- "" - ALANG$`Post-Map` <- "" - ALANG$Years <- "1" - ALANG$Margin <- "1" - ALANG$Coef1 <- "1" +n_reg <- nrow(root$region) # Number of regions in root + +# Create data frame to have complete list of root regions for writing to file +df <- data.frame("RHS" = rep(NaN,n_reg), "SE" = rep(NaN,n_reg)) +df$RHS[data$Code] <- data$Quantity +df$SE[data$Code] <- data$SE + +# Create empty ALANG table with header +source(paste0(path$Subroutines,"/makeALANGheadline.R")) +# Extend table with additional columns - return(ALANG) -} \ No newline at end of file +for(i in data$Code) +{ + reg_name <- as.character(root$region$Name[i]) # Name of region + + sel <- df + sel[-i,] <- NaN + + filename <- list("RHS" = paste0("/",datafeed_name,"/",datafeed_name,"_RHS_",year, + "_",root$region$RootCountryAbbreviation[i],".csv"), + "SE" = paste0("/",datafeed_name,"/",datafeed_name,"_SE_",year, + "_",root$region$RootCountryAbbreviation[i],".csv")) + + # Write RHS and SE data to folder + Numbers2File( sel$RHS, paste0(path$Processed,filename$RHS) ) + Numbers2File( sel$SE, paste0(path$Processed,filename$SE) ) + + # Add command + ALANG <- add_row(ALANG,'1' = paste( Settings$WSA_name, reg_name, year ), + Value = paste0( "DATAPATH", filename$RHS ), + S.E. = paste0( "DATAPATH", filename$SE ) ) +} + +# Add other variables +ALANG$`#` <- as.character(1:nrow(ALANG)) +ALANG$Incl <- "Y" +ALANG$Parts <- "1" + +ALANG$Years <- "1" +ALANG$Margin <- "1" +ALANG$Coef1 <- "1" + +ALANG$`Row parent` <- "1:e" +ALANG$`Row child` <- "1" +ALANG$`Row grandchild` <- Grandchild$RoW + +ALANG$`Column parent` <- "1:e~3" +ALANG$`Column child` <- "2" +ALANG$`Column grandchild` <- Grandchild$Column + +ALANG$`Pre-map` <- "" +ALANG$`Post-map` <- "" +ALANG$`Pre-Map` <- "" +ALANG$`Post-Map` <- "" + +# Call script that writes the ALANG file to the respective folder in the root +source(paste0(path$root,"Rscripts/datafeeds_code/datafeed_subroutines/WriteALANG2Folder.R")) diff --git a/Rscripts/datafeeds_code/datafeed_subroutines/WriteALANG2Folder.R b/Rscripts/datafeeds_code/datafeed_subroutines/WriteALANG2Folder.R index de22c5b..82a5caf 100644 --- a/Rscripts/datafeeds_code/datafeed_subroutines/WriteALANG2Folder.R +++ b/Rscripts/datafeeds_code/datafeed_subroutines/WriteALANG2Folder.R @@ -14,4 +14,5 @@ dir.create(path_set) filename <- paste0(path_set,"/",gsub("-","",Sys.Date()), "_PIOLab_",datafeed_name,"_000_Constraints-",year,"_000_RoWincluded.txt") print(filename) + write.table(ALANG,file = filename,row.names = FALSE, quote = F,sep = "\t") diff --git a/Settings/20190112_PIOLab_ALL_ALL_ALANGProxyLibrary.xlsx b/Settings/20190112_PIOLab_ALL_ALL_ALANGProxyLibrary.xlsx new file mode 100644 index 0000000..745981c Binary files /dev/null and b/Settings/20190112_PIOLab_ALL_ALL_ALANGProxyLibrary.xlsx differ diff --git a/Settings/20190112_PIOLab_ALL_ALL_SatelliteDefinitions.xlsx b/Settings/20190112_PIOLab_ALL_ALL_SatelliteDefinitions.xlsx new file mode 100644 index 0000000..ea0e193 Binary files /dev/null and b/Settings/20190112_PIOLab_ALL_ALL_SatelliteDefinitions.xlsx differ diff --git a/Settings/20190112_PIOLab_ALL_ALL_SatelliteDescriptorLibrary.xlsx b/Settings/20190112_PIOLab_ALL_ALL_SatelliteDescriptorLibrary.xlsx new file mode 100644 index 0000000..2804d63 Binary files /dev/null and b/Settings/20190112_PIOLab_ALL_ALL_SatelliteDescriptorLibrary.xlsx differ diff --git a/Settings/20190112_PIOLab_ALL_ALL_SatelliteIndex.xlsx b/Settings/20190112_PIOLab_ALL_ALL_SatelliteIndex.xlsx new file mode 100644 index 0000000..f07b031 Binary files /dev/null and b/Settings/20190112_PIOLab_ALL_ALL_SatelliteIndex.xlsx differ diff --git a/Settings/PIOLab_settings.txt b/Settings/PIOLab_settings.txt new file mode 100644 index 0000000..b62519e --- /dev/null +++ b/Settings/PIOLab_settings.txt @@ -0,0 +1,65 @@ +Settings File for the PIOLab +(c) Arne Geschke, 12th Nov 2019, arne.geschke@sydney.edu.au + +If you add a numeric flag that is not in already handled in makenumeric, you MUST insert this value in makenumeric + +root.clearname=Physical Input-Output Lab +root.shortname=PIOLab + +Default Concordances (only the filenames, no paths): +regionagg_default=none +sectoragg_default=none +sectoraggprod_default=none + +Year for which the IE data was written +root.baseyear=2008 + +Number of Markups in this root + +root.NMarkups=1 +root.NRegions=221 + +Admin Email for this root + +admin_email=hanspeter.wieland@wu.ac.at + +Specify, how the RoW blocks are generated +three options: +fixed: remains the same +variable: same as in Base MRIO +NoRoW: RoW cannot be added (used for global tables) +This one used to be imports variable and exports fixed + +root.RoW_Imports_type=NoRoW +root.RoW_Exports_type=NoRoW + +if imports or exports RoW is fixed, define NSectors here + +root.RoW_Exports_NSectors=1 + +Specify, if small values should be deleted in this root and if yes, what the threshold is + +root.postprocess_deletesmallvalues=yes +root.postprocess_deletesmallvalues_bp_lowerboundary=-0.1 +root.postprocess_deletesmallvalues_bp_upperboundary=0.1 +root.postprocess_deletesmallvalues_margins_lowerboundary=-0.1 +root.postprocess_deletesmallvalues_margins_upperboundary=0.1 + +Root table structure set up +Homegeneous and heterogeneous table structures can be supported. For each of these two selections, the root can allow for IIOT, CIOT, SUT, or mixed tables + +root.tablestructure=homogeneous +root.regiontype=SUT +root.indprodidentical=0 + +Specify if this root needs to use adjustentities. This is only the case if mixed SUT/IIOT/CIOT structures are allowed and intended under this root +0 means no, 1 means yes +if tablestructure and tabletype are set, then adjustentities will be set accordingly + +flags.useadjustentities=0 + +Additional toolboxes + +Define the visibility of the root + +root.visibility=geschke,fry,manni,hwie3321 diff --git a/Settings/datafeeds_settings/.directory b/Settings/datafeeds_settings/.directory new file mode 100644 index 0000000..957be93 --- /dev/null +++ b/Settings/datafeeds_settings/.directory @@ -0,0 +1,7 @@ +[Dolphin] +AdditionalInfo=3 +Timestamp=2020,1,9,21,47,26 +ViewMode=1 + +[Settings] +ShowDotFiles=true diff --git a/Settings/datafeeds_settings/WSA_settings.xlsx b/Settings/datafeeds_settings/WSA_settings.xlsx new file mode 100644 index 0000000..817518c Binary files /dev/null and b/Settings/datafeeds_settings/WSA_settings.xlsx differ diff --git a/datafeeds_code/datafeed_PIOLab_BACI.m b/datafeeds_code/datafeed_PIOLab_BACI.m new file mode 100644 index 0000000..33f60f2 --- /dev/null +++ b/datafeeds_code/datafeed_PIOLab_BACI.m @@ -0,0 +1,5 @@ +function datafeed_PIOLab_BACI(handles) + +system('Rscript /import/emily1/isa/IELab/Roots/PIOLab/Rscripts/datafeeds_code/datafeed_PIOLab_BACI.R','-echo'); + +end diff --git a/datafeeds_code/datafeed_PIOLab_IRPexports.m b/datafeeds_code/datafeed_PIOLab_IRPexports.m new file mode 100644 index 0000000..468137a --- /dev/null +++ b/datafeeds_code/datafeed_PIOLab_IRPexports.m @@ -0,0 +1,5 @@ +function datafeed_PIOLab_IRPexports(handles) + +system('Rscript /import/emily1/isa/IELab/Roots/PIOLab/Rscripts/datafeeds_code/datafeed_PIOLab_IRPexports.R','-echo'); + +end \ No newline at end of file diff --git a/datafeeds_code/datafeed_PIOLab_IRPextraction.m b/datafeeds_code/datafeed_PIOLab_IRPextraction.m new file mode 100644 index 0000000..a52406f --- /dev/null +++ b/datafeeds_code/datafeed_PIOLab_IRPextraction.m @@ -0,0 +1,5 @@ +function datafeed_PIOLab_IRPextraction(handles) + +system('Rscript /import/emily1/isa/IELab/Roots/PIOLab/Rscripts/datafeeds_code/datafeed_PIOLab_IRPextraction.R','-echo'); + +end diff --git a/datafeeds_code/datafeed_PIOLab_IRPimports.m b/datafeeds_code/datafeed_PIOLab_IRPimports.m new file mode 100644 index 0000000..7ec22a9 --- /dev/null +++ b/datafeeds_code/datafeed_PIOLab_IRPimports.m @@ -0,0 +1,5 @@ +function datafeed_PIOLab_IRPimports(handles) + +system('Rscript /import/emily1/isa/IELab/Roots/PIOLab/Rscripts/datafeeds_code/datafeed_PIOLab_IRPimports.R','-echo'); + +end \ No newline at end of file diff --git a/datafeeds_code/datafeed_PIOLab_KrausmannTotalsEoL.m b/datafeeds_code/datafeed_PIOLab_KrausmannTotalsEoL.m new file mode 100644 index 0000000..f92a05e --- /dev/null +++ b/datafeeds_code/datafeed_PIOLab_KrausmannTotalsEoL.m @@ -0,0 +1,5 @@ +function datafeed_PIOLab_KrausmannTotalsEoL(handles) + +system('Rscript /import/emily1/isa/IELab/Roots/PIOLab/Rscripts/datafeeds_code/datafeed_PIOLab_KrausmannTotalsEoL.R','-echo'); + +end diff --git a/datafeeds_code/datafeed_PIOLab_PauliukEoL.m b/datafeeds_code/datafeed_PIOLab_PauliukEoL.m new file mode 100644 index 0000000..f26306b --- /dev/null +++ b/datafeeds_code/datafeed_PIOLab_PauliukEoL.m @@ -0,0 +1,5 @@ +function datafeed_PIOLab_PauliukEoL(handles) + +system('Rscript /import/emily1/isa/IELab/Roots/PIOLab/Rscripts/datafeeds_code/datafeed_PIOLab_PauliukEoL.R','-echo'); + +end diff --git a/datafeeds_code/datafeed_PIOLab_PrimaryInputEqualsFinalUse.m b/datafeeds_code/datafeed_PIOLab_PrimaryInputEqualsFinalUse.m new file mode 100644 index 0000000..fa57a36 --- /dev/null +++ b/datafeeds_code/datafeed_PIOLab_PrimaryInputEqualsFinalUse.m @@ -0,0 +1,5 @@ +function datafeed_PIOLab_PrimaryInputEqualsFinalUse(handles) + +system('Rscript /import/emily1/isa/IELab/Roots/PIOLab/Rscripts/datafeeds_code/datafeed_PIOLab_PrimaryInputEqualsFinalUse.R','-echo'); + +end diff --git a/datafeeds_code/datafeed_PIOLab_RatioConstraints.m b/datafeeds_code/datafeed_PIOLab_RatioConstraints.m new file mode 100644 index 0000000..bceefe7 --- /dev/null +++ b/datafeeds_code/datafeed_PIOLab_RatioConstraints.m @@ -0,0 +1,5 @@ +function datafeed_PIOLab_RatioConstraints(handles) + +system('Rscript /import/emily1/isa/IELab/Roots/PIOLab/Rscripts/datafeeds_code/datafeed_PIOLab_RatioConstraints.R','-echo'); + +end diff --git a/datafeeds_code/datafeed_PIOLab_WSABOFsteel.m b/datafeeds_code/datafeed_PIOLab_WSABOFsteel.m new file mode 100644 index 0000000..41a6e3b --- /dev/null +++ b/datafeeds_code/datafeed_PIOLab_WSABOFsteel.m @@ -0,0 +1,5 @@ +function datafeed_PIOLab_WSABOFsteel(handles) + +system('Rscript /import/emily1/isa/IELab/Roots/PIOLab/Rscripts/datafeeds_code/datafeed_PIOLab_WSABOFsteel.R','-echo'); + +end diff --git a/datafeeds_code/datafeed_PIOLab_WSAEAFsteel.m b/datafeeds_code/datafeed_PIOLab_WSAEAFsteel.m new file mode 100644 index 0000000..c6f647d --- /dev/null +++ b/datafeeds_code/datafeed_PIOLab_WSAEAFsteel.m @@ -0,0 +1,5 @@ +function datafeed_PIOLab_WSAEAFsteel(handles) + +system('Rscript /import/emily1/isa/IELab/Roots/PIOLab/Rscripts/datafeeds_code/datafeed_PIOLab_WSAEAFsteel.R','-echo'); + +end diff --git a/datafeeds_code/datafeed_PIOLab_WSAFlatRolledProducts.m b/datafeeds_code/datafeed_PIOLab_WSAFlatRolledProducts.m new file mode 100644 index 0000000..ff03497 --- /dev/null +++ b/datafeeds_code/datafeed_PIOLab_WSAFlatRolledProducts.m @@ -0,0 +1,5 @@ +function datafeed_PIOLab_WSAFlatRolledProducts(handles) + +system('Rscript /import/emily1/isa/IELab/Roots/PIOLab/Rscripts/datafeeds_code/datafeed_PIOLab_WSAFlatRolledProducts.R','-echo'); + +end diff --git a/datafeeds_code/datafeed_PIOLab_WSAIngots.m b/datafeeds_code/datafeed_PIOLab_WSAIngots.m new file mode 100644 index 0000000..d5cdfd0 --- /dev/null +++ b/datafeeds_code/datafeed_PIOLab_WSAIngots.m @@ -0,0 +1,5 @@ +function datafeed_PIOLab_WSAIngots(handles) + +system('Rscript /import/emily1/isa/IELab/Roots/PIOLab/Rscripts/datafeeds_code/datafeed_PIOLab_WSAIngots.R','-echo'); + +end diff --git a/datafeeds_code/datafeed_PIOLab_WSALongRolledProducts.m b/datafeeds_code/datafeed_PIOLab_WSALongRolledProducts.m new file mode 100644 index 0000000..b2971be --- /dev/null +++ b/datafeeds_code/datafeed_PIOLab_WSALongRolledProducts.m @@ -0,0 +1,5 @@ +function datafeed_PIOLab_WSALongRolledProducts(handles) + +system('Rscript /import/emily1/isa/IELab/Roots/PIOLab/Rscripts/datafeeds_code/datafeed_PIOLab_WSALongRolledProducts.R','-echo'); + +end diff --git a/datafeeds_code/datafeed_PIOLab_WSAOHFsteel.m b/datafeeds_code/datafeed_PIOLab_WSAOHFsteel.m new file mode 100644 index 0000000..3b192ed --- /dev/null +++ b/datafeeds_code/datafeed_PIOLab_WSAOHFsteel.m @@ -0,0 +1,5 @@ +function datafeed_PIOLab_WSAOHFsteel(handles) + +system('Rscript /import/emily1/isa/IELab/Roots/PIOLab/Rscripts/datafeeds_code/datafeed_PIOLab_WSAOHFsteel.R','-echo'); + +end diff --git a/datafeeds_code/datafeed_PIOLab_WSAPigIron.m b/datafeeds_code/datafeed_PIOLab_WSAPigIron.m new file mode 100644 index 0000000..41b8cba --- /dev/null +++ b/datafeeds_code/datafeed_PIOLab_WSAPigIron.m @@ -0,0 +1,5 @@ +function datafeed_PIOLab_WSAPigIron(handles) + +system('Rscript /import/emily1/isa/IELab/Roots/PIOLab/Rscripts/datafeeds_code/datafeed_PIOLab_WSAPigIron.R','-echo'); + +end diff --git a/datafeeds_code/datafeed_PIOLab_WSASpongeIron.m b/datafeeds_code/datafeed_PIOLab_WSASpongeIron.m new file mode 100644 index 0000000..f3c40ee --- /dev/null +++ b/datafeeds_code/datafeed_PIOLab_WSASpongeIron.m @@ -0,0 +1,5 @@ +function datafeed_PIOLab_WSASpongeIron(handles) + +system('Rscript /import/emily1/isa/IELab/Roots/PIOLab/Rscripts/datafeeds_code/datafeed_PIOLab_WSASpongeIron.R','-echo'); + +end diff --git a/datafeeds_code/datafeed_PIOLab_balancing.m b/datafeeds_code/datafeed_PIOLab_balancing.m new file mode 100644 index 0000000..19965da --- /dev/null +++ b/datafeeds_code/datafeed_PIOLab_balancing.m @@ -0,0 +1,5 @@ +function datafeed_PIOLab_balancing(handles) + +system('Rscript /import/emily1/isa/IELab/Roots/PIOLab/Rscripts/datafeeds_code/datafeed_PIOLab_balancing.R','-echo'); + +end